code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import math
def _a ( a :int ) -> bool:
assert isinstance(a , a ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
a = range(3 , int(math.sqrt(a ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a ( a :int , a :Optional[int]=1 , **a :List[str] ) -> str:
a = factor * value
a = value
while not is_prime(a ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **a )
return value
| 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 0 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : int = BeautifulSoup(requests.get(_UpperCAmelCase, params=_UpperCAmelCase ).content, 'html.parser' )
lowerCAmelCase : Union[str, Any] = soup.find('div', attrs={'class': 'gs_ri'} )
lowerCAmelCase : int = div.find('div', attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
__A : Optional[Any] = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 364 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323 | 0 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
_lowerCamelCase : Any = [True] * n
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[int] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCamelCase : Union[str, Any] = i * 2
while index < n:
_lowerCamelCase : List[Any] = False
_lowerCamelCase : str = index + i
_lowerCamelCase : Any = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def _snake_case ( lowercase__ = 999966663333 ):
_lowerCamelCase : Tuple = math.floor(math.sqrt(lowercase__ ) ) + 100
_lowerCamelCase : Optional[int] = prime_sieve(lowercase__ )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Tuple = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCamelCase : List[str] = primes[prime_index + 1]
_lowerCamelCase : Dict = last_prime**2
_lowerCamelCase : int = next_prime**2
# Get numbers divisible by lps(current)
_lowerCamelCase : Any = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCamelCase : str = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCamelCase : int = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCamelCase : str = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 96 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : int = 1
_A : bool = True
_A : bool = False
_A : bool = False
_A : bool = False
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Tuple:
a_ : int = []
a_ : List[Any] = []
for i in range(self.num_layers ):
a_ : Any = self.in_channels if i == 0 else self.out_channels
a_ : List[str] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
a_ : List[str] = resnets
a_ : str = attentions
if self.add_downsample:
a_ : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Optional[int]:
a_ : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a_ : Any = resnet(_lowercase , _lowercase , deterministic=_lowercase )
a_ : Any = attn(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
a_ : str = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : bool = True
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = []
for i in range(self.num_layers ):
a_ : List[str] = self.in_channels if i == 0 else self.out_channels
a_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Tuple = resnets
if self.add_downsample:
a_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase=True ) -> int:
a_ : Tuple = ()
for resnet in self.resnets:
a_ : Union[str, Any] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
a_ : List[Any] = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : int = 1
_A : bool = True
_A : bool = False
_A : bool = False
_A : bool = False
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Any:
a_ : Dict = []
a_ : Union[str, Any] = []
for i in range(self.num_layers ):
a_ : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
a_ : Any = resnets
a_ : Dict = attentions
if self.add_upsample:
a_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> int:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a_ : Optional[Any] = res_hidden_states_tuple[-1]
a_ : Tuple = res_hidden_states_tuple[:-1]
a_ : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ : Dict = resnet(_lowercase , _lowercase , deterministic=_lowercase )
a_ : List[str] = attn(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
a_ : str = self.upsamplers_a(_lowercase )
return hidden_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : bool = True
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Any:
a_ : List[str] = []
for i in range(self.num_layers ):
a_ : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Optional[int] = resnets
if self.add_upsample:
a_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> int:
for resnet in self.resnets:
# pop res hidden states
a_ : int = res_hidden_states_tuple[-1]
a_ : List[Any] = res_hidden_states_tuple[:-1]
a_ : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ : str = resnet(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
a_ : Any = self.upsamplers_a(_lowercase )
return hidden_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : float = 0.0
_A : int = 1
_A : int = 1
_A : bool = False
_A : bool = False
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> List[Any]:
# there is always at least one resnet
a_ : Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a_ : Optional[Any] = []
for _ in range(self.num_layers ):
a_ : List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
a_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Any = resnets
a_ : Tuple = attentions
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Dict:
a_ : int = self.resnets[0](_lowercase , _lowercase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a_ : Dict = attn(_lowercase , _lowercase , deterministic=_lowercase )
a_ : str = resnet(_lowercase , _lowercase , deterministic=_lowercase )
return hidden_states
| 248 | 0 |
'''simple docstring'''
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(a , a ):
return 0
elif n == 2:
return 1
else:
__magic_name__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 2
while digits < n:
index += 1
__magic_name__ = len(str(fibonacci(a ) ) )
return index
def UpperCamelCase ( a = 1000 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 366 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , a__ : Union[str, Any] , a__ : Dict=13 , a__ : List[str]=32 , a__ : List[Any]=2 , a__ : List[str]=3 , a__ : Union[str, Any]=16 , a__ : Dict=[1, 2, 1] , a__ : Optional[Any]=[2, 2, 4] , a__ : List[str]=2 , a__ : Optional[Any]=2.0 , a__ : Union[str, Any]=True , a__ : int=0.0 , a__ : int=0.0 , a__ : Tuple=0.1 , a__ : List[str]="gelu" , a__ : str=False , a__ : Optional[int]=True , a__ : List[Any]=0.02 , a__ : Any=1E-5 , a__ : int=True , a__ : List[Any]=None , a__ : Dict=True , a__ : Optional[int]=10 , a__ : Any=8 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
def snake_case__ ( self : List[Any] ):
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case__ ( self : Optional[int] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] ):
__magic_name__ = SwinvaModel(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Optional[Any] , a__ : Optional[Any] , a__ : str , a__ : int ):
__magic_name__ = SwinvaForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = SwinvaForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case__ ( self : List[str] , a__ : List[str] , a__ : List[Any] , a__ : Any ):
__magic_name__ = self.type_sequence_label_size
__magic_name__ = SwinvaForImageClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :int = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE :Tuple = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Dict = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
def snake_case__ ( self : str ):
__magic_name__ = SwinvaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , embed_dim=37 )
def snake_case__ ( self : Tuple ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[Any] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def snake_case__ ( self : str ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def snake_case__ ( self : Union[str, Any] ):
pass
def snake_case__ ( self : Optional[int] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.attentions
__magic_name__ = len(self.model_tester.depths )
self.assertEqual(len(a__ ) , a__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = config.window_size**2
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__magic_name__ = len(a__ )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
__magic_name__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__magic_name__ = 2
self.assertEqual(out_len + added_hidden_states , len(a__ ) )
__magic_name__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case__ ( self : Any , a__ : Dict , a__ : str , a__ : str , a__ : List[Any] ):
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swinv2 has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = reshaped_hidden_states[0].shape
__magic_name__ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : List[Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def snake_case__ ( self : Dict ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def snake_case__ ( self : Any ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = SwinvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def snake_case__ ( self : List[str] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Optional[Any] ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[int] ):
__magic_name__ = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
a__ )
__magic_name__ = self.default_image_processor
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__magic_name__ = model(**a__ )
# verify the logits
__magic_name__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__magic_name__ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 98 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_lowercase =len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , )
def UpperCAmelCase_ ( __snake_case ) -> None:
"""simple docstring"""
_lowercase =[]
depth_first_search([] , [] , [] , __snake_case , __snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print('''''' )
print(len(__snake_case ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 5 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
a__: Optional[int] =[[1, 2, 4], [1, 2, 3, 4]]
a__: List[Any] =DisjunctiveConstraint(_a )
self.assertTrue(isinstance(dc.token_ids , _a ) )
with self.assertRaises(_a ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_a ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowerCamelCase ( self : Any ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
a__: Optional[Any] =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_a ):
DisjunctiveConstraint(_a ) # fails here
def _lowerCamelCase ( self : List[str] ):
a__: Any =[[1, 2, 3], [1, 2, 4]]
a__: Optional[int] =DisjunctiveConstraint(_a )
a__: List[Any] =dc.update(1 )
a__: Union[str, Any] =stepped is True and completed is False and reset is False
self.assertTrue(_a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a__: str =dc.update(2 )
a__: Any =stepped is True and completed is False and reset is False
self.assertTrue(_a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a__: Any =dc.update(3 )
a__: List[Any] =stepped is True and completed is True and reset is False
self.assertTrue(_a )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowerCamelCase ( self : Optional[int] ):
a__: str =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a__: Union[str, Any] =DisjunctiveConstraint(_a )
a__: List[Any] =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a__: List[str] =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a__: int =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a__: str =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a__: Tuple =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a__: int =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a__: Dict =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 354 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCAmelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = '''left'''
def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a__: Dict =3
a__: Tuple =do_lower_case
a__: int =remove_space
a__: List[Any] =keep_accents
a__: List[str] =vocab_file
a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCamelCase ( self : Any ):
return len(self.sp_model )
def _lowerCamelCase ( self : List[Any] ):
a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: List[Any] =None
return state
def __setstate__( self : Optional[Any] , _a : Tuple ):
a__: List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: List[str] ={}
a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Dict , _a : str ):
if self.remove_space:
a__: Optional[int] =" ".join(inputs.strip().split() )
else:
a__: Optional[int] =inputs
a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__: Optional[int] =unicodedata.normalize("NFKD" , _a )
a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
a__: Dict =outputs.lower()
return outputs
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Dict =self.preprocess_text(_a )
a__: Dict =self.sp_model.encode(_a , out_type=_a )
a__: str =[]
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__: Optional[int] =cur_pieces[1:]
else:
a__: Tuple =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCamelCase ( self : Dict , _a : Dict ):
return self.sp_model.PieceToId(_a )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] ):
return self.sp_model.IdToPiece(_a )
def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ):
a__: Tuple ="".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ):
a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a )
a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: List[str] =[]
a__: Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
a__: List[str] =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Union[str, Any] ="".join(_a )
a__: List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Optional[int] =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Dict =[self.sep_token_id]
a__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Any =[self.sep_token_id]
a__: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__: List[Any] =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
a__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 42 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int ) -> Union[str, Any]:
lowerCAmelCase = val
lowerCAmelCase = None
lowerCAmelCase = None
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase = Node(UpperCAmelCase__ )
else:
self.left.insert(UpperCAmelCase__ )
elif val > self.val:
if self.right is None:
lowerCAmelCase = Node(UpperCAmelCase__ )
else:
self.right.insert(UpperCAmelCase__ )
else:
lowerCAmelCase = val
def a_ ( lowerCamelCase : Any , lowerCamelCase : int ):
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase )
res.append(root.val )
inorder(root.right , lowerCamelCase )
def a_ ( lowerCamelCase : Dict ):
# Build BST
if len(lowerCamelCase ) == 0:
return arr
lowerCAmelCase = Node(arr[0] )
for i in range(1 , len(lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase = []
inorder(lowerCamelCase , lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase = TypeVar('T')
__lowerCAmelCase = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase = Union[str, bytes, os.PathLike] | 341 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
A_ : str = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
A_ : List[str] = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE ):
for patt, repl in iter(__a ):
A_ : Optional[int] = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
A_ : Union[str, Any] = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A_ : str = to_tf_var_name(__a )
A_ : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A_ : str = torch_tensor.T
A_ : str = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
A_ : Optional[Any] = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
A_ : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=None ):
A_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__a , required=__a , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__a , default=__a , required=__a , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__a , required=__a , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__a , required=__a , help='''Directory in which to save tensorflow model''' )
A_ : Dict = parser.parse_args(__a )
A_ : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 355 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
A_ : List[Any] = self.unet.config.sample_size
A_ : List[Any] = (batch_size, 3, img_size, img_size)
A_ : List[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A_ : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A_ : str = self.scheduler.schedule[t]
A_ : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A_ , A_ : List[str] = self.scheduler.add_noise_to_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A_ : Dict = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : int = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A_ : Optional[Any] = self.scheduler.step_correct(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step_output.prev_sample , step_output['''derivative'''] , )
A_ : List[Any] = step_output.prev_sample
A_ : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
A_ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 65 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__lowercase : List[Any] = logging.get_logger(__name__)
class __UpperCamelCase :
A_ = None
@experimental
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return _map_with_joblib(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Optional[int] = num_proc if num_proc <= len(_SCREAMING_SNAKE_CASE ) else len(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(_SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) // num_proc
__a : Optional[int] = len(_SCREAMING_SNAKE_CASE ) % num_proc
__a : Optional[int] = div * index + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(_SCREAMING_SNAKE_CASE )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(_SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
__a , __a : Union[str, Any] = None, None
if not disable_tqdm:
__a , __a : Tuple = (RLock(),), tqdm.set_lock
with Pool(_SCREAMING_SNAKE_CASE , initargs=_SCREAMING_SNAKE_CASE , initializer=_SCREAMING_SNAKE_CASE ) as pool:
__a : Union[str, Any] = pool.map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(F"""Finished {num_proc} processes""" )
__a : Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(_SCREAMING_SNAKE_CASE )} objects""" )
return mapped
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(_SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Union[str, Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__a : Optional[int] = None
| 27 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 0 |
import sys
A : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCamelCase ( __magic_name__ : str = N ) -> int:
"""simple docstring"""
lowercase__ = -sys.maxsize - 1
for i in range(len(__magic_name__ ) - 12 ):
lowercase__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase__ = product
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 146 |
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> list:
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = []
for i in range(len(__magic_name__ ) - pat_len + 1 ):
lowercase__ = True
for j in range(__magic_name__ ):
if s[i + j] != pattern[j]:
lowercase__ = False
break
if match_found:
position.append(__magic_name__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 146 | 1 |
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : int) -> qiskit.result.counts.Counts:
'''simple docstring'''
__UpperCamelCase : List[Any] = qiskit.Aer.get_backend("aer_simulator")
# Create a Quantum Circuit acting on the q register
__UpperCamelCase : int = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase)
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0])
# Execute the circuit on the simulator
__UpperCamelCase : str = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCamelCase)
if __name__ == "__main__":
print(f"Total count for various states are: {single_qubit_measure(1, 1)}") | 232 |
import argparse
import datetime
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : str = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__UpperCamelCase : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
__UpperCamelCase : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
__UpperCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
__UpperCamelCase : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
__UpperCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
__UpperCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
__UpperCamelCase : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
__UpperCamelCase : Any = y - 1
__UpperCamelCase : Optional[Any] = m + 12
# maths var
__UpperCamelCase : int = int(str(_lowerCamelCase)[:2])
__UpperCamelCase : int = int(str(_lowerCamelCase)[2:])
__UpperCamelCase : int = int(2.6 * m - 5.3_9)
__UpperCamelCase : int = int(c / 4)
__UpperCamelCase : int = int(k / 4)
__UpperCamelCase : int = int(d + k)
__UpperCamelCase : int = int(t + u + v + x)
__UpperCamelCase : int = int(z - (2 * c))
__UpperCamelCase : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
__UpperCamelCase : str = F'Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[int] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowercase : Tuple = parser.parse_args()
zeller(args.date_input) | 232 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
__UpperCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def snake_case_ (__A : Tuple ) -> List[str]:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 1_0:
__lowerCAmelCase : List[Any] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 1_0:
__lowerCAmelCase : int = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 1_0:
__lowerCAmelCase : Any = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 1_0:
__lowerCAmelCase : str = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
__lowerCAmelCase : Optional[Any] = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
__lowerCAmelCase : Any = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCAmelCase : Any = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
__lowerCAmelCase : List[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def snake_case_ (__A : Optional[Any] , __A : Dict , __A : List[Any] , __A : List[Any] ) -> Any:
__lowerCAmelCase : Dict = {}
import re
__lowerCAmelCase : int = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
__lowerCAmelCase : Union[str, Any] = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
__lowerCAmelCase : Tuple = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
__lowerCAmelCase : Union[str, Any] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
__lowerCAmelCase : List[Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
__lowerCAmelCase : int = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
__lowerCAmelCase : Union[str, Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
__lowerCAmelCase : int = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
__lowerCAmelCase : List[str] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__A ):
__lowerCAmelCase : Tuple = re_encoder_block_conv_in.match(__A )
__lowerCAmelCase : int = regex_match.groups()
__lowerCAmelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase : Any = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
__lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(__A , __A )
elif re_encoder_block_resnet.fullmatch(__A ):
__lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(__A )
__lowerCAmelCase : Optional[Any] = regex_match.groups()
__lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase : Any = {"""1""": 1, """3""": 2}[groups[-2]]
__lowerCAmelCase : Optional[int] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
__lowerCAmelCase : Optional[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__lowerCAmelCase : Dict = prefix + resnet_block
__lowerCAmelCase : List[Any] = re_encoder_block_resnet.sub(__A , __A )
elif re_encoder_block_proj_out.fullmatch(__A ):
__lowerCAmelCase : Tuple = re_encoder_block_proj_out.match(__A )
__lowerCAmelCase : Optional[Any] = regex_match.groups()
__lowerCAmelCase : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
__lowerCAmelCase : str = re_encoder_block_proj_out.sub(__A , __A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__A ):
__lowerCAmelCase : Any = re_decoder_block_conv_out.match(__A )
__lowerCAmelCase : int = regex_match.groups()
__lowerCAmelCase : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase : Optional[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
__lowerCAmelCase : Union[str, Any] = re_decoder_block_conv_out.sub(__A , __A )
elif re_decoder_block_resnet.fullmatch(__A ):
__lowerCAmelCase : int = re_decoder_block_resnet.match(__A )
__lowerCAmelCase : Union[str, Any] = regex_match.groups()
__lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase : Dict = {"""1""": 1, """3""": 2}[groups[-2]]
__lowerCAmelCase : List[str] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
__lowerCAmelCase : Tuple = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__lowerCAmelCase : Optional[int] = prefix + resnet_block
__lowerCAmelCase : Union[str, Any] = re_decoder_block_resnet.sub(__A , __A )
elif re_decoder_block_proj_in.fullmatch(__A ):
__lowerCAmelCase : Tuple = re_decoder_block_proj_in.match(__A )
__lowerCAmelCase : int = regex_match.groups()
__lowerCAmelCase : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
__lowerCAmelCase : List[Any] = re_decoder_block_proj_in.sub(__A , __A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__A ):
__lowerCAmelCase : str = re_prior_cond_conv_out.match(__A )
__lowerCAmelCase : str = regex_match.groups()
__lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase : List[str] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
__lowerCAmelCase : int = re_prior_cond_conv_out.sub(__A , __A )
elif re_prior_cond_resnet.fullmatch(__A ):
__lowerCAmelCase : Optional[int] = re_prior_cond_resnet.match(__A )
__lowerCAmelCase : Tuple = regex_match.groups()
__lowerCAmelCase : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase : int = {"""1""": 1, """3""": 2}[groups[-2]]
__lowerCAmelCase : List[Any] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
__lowerCAmelCase : str = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
__lowerCAmelCase : Optional[Any] = prefix + resnet_block
__lowerCAmelCase : str = re_prior_cond_resnet.sub(__A , __A )
elif re_prior_cond_proj_in.fullmatch(__A ):
__lowerCAmelCase : Dict = re_prior_cond_proj_in.match(__A )
__lowerCAmelCase : Tuple = regex_match.groups()
__lowerCAmelCase : List[str] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
__lowerCAmelCase : Optional[int] = re_prior_cond_proj_in.sub(__A , __A )
# keep original key
else:
__lowerCAmelCase : List[Any] = original_key
__lowerCAmelCase : Optional[int] = replace_key(__A )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
__lowerCAmelCase : int = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
__lowerCAmelCase : Optional[int] = original_key
__lowerCAmelCase : Tuple = original_key
__lowerCAmelCase : str = value
return new_dict
@torch.no_grad()
def snake_case_ (__A : str=None , __A : List[str]=None ) -> List[Any]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
__lowerCAmelCase : Union[str, Any] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=__A )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=__A )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , """wb""" ).write(r.content )
__lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
__lowerCAmelCase : List[Any] = JukeboxConfig.from_pretrained(__A )
__lowerCAmelCase : str = JukeboxModel(__A )
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Dict = {}
for i, dict_name in enumerate(__A ):
__lowerCAmelCase : int = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["""model"""]
__lowerCAmelCase : List[Any] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
__lowerCAmelCase : Optional[int] = old_dic[k]
elif k.endswith(""".w""" ):
__lowerCAmelCase : Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCAmelCase : Dict = old_dic[k]
else:
__lowerCAmelCase : Tuple = old_dic[k]
__lowerCAmelCase : Any = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
__lowerCAmelCase : Any = fix_jukebox_keys(__A , model.state_dict() , __A , __A )
weight_dict.append(__A )
__lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(__A )
for i in range(len(__A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__A ).mkdir(exist_ok=__A )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(__A , __A )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
return weight_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 139 |
from pathlib import Path
import fire
def snake_case_ (__A : str , __A : str , __A : int ) -> Any:
__lowerCAmelCase : Tuple = Path(__A )
__lowerCAmelCase : Tuple = Path(__A )
dest_dir.mkdir(exist_ok=__A )
for path in src_dir.iterdir():
__lowerCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCAmelCase : Dict = dest_dir.joinpath(path.name )
print(__A )
dest_path.open("""w""" ).write("""\n""".join(__A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 139 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowercase = logging.get_logger(__name__)
# General docstring
__lowercase = """ResNetConfig"""
# Base docstring
__lowercase = """microsoft/resnet-50"""
__lowercase = [1, 2048, 7, 7]
# Image classification docstring
__lowercase = """microsoft/resnet-50"""
__lowercase = """tiger cat"""
__lowercase = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu"):
super().__init__()
a : List[Any] = nn.Convad(
__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=kernel_size // 2 , bias=__UpperCAmelCase)
a : str = nn.BatchNormad(__UpperCAmelCase)
a : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Tensor):
a : Union[str, Any] = self.convolution(__UpperCAmelCase)
a : Optional[Any] = self.normalization(__UpperCAmelCase)
a : Optional[int] = self.activation(__UpperCAmelCase)
return hidden_state
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : ResNetConfig):
super().__init__()
a : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
a : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
a : str = config.num_channels
def __snake_case ( self : str , __UpperCAmelCase : Tensor):
a : Optional[int] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
a : Optional[int] = self.embedder(__UpperCAmelCase)
a : Optional[int] = self.pooler(__UpperCAmelCase)
return embedding
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2):
super().__init__()
a : Union[str, Any] = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , stride=__UpperCAmelCase , bias=__UpperCAmelCase)
a : int = nn.BatchNormad(__UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : Tensor):
a : Any = self.convolution(__UpperCAmelCase)
a : Any = self.normalization(__UpperCAmelCase)
return hidden_state
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu"):
super().__init__()
a : Optional[int] = in_channels != out_channels or stride != 1
a : List[str] = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity()
)
a : str = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , activation=__UpperCAmelCase) , )
a : Optional[int] = ACTaFN[activation]
def __snake_case ( self : str , __UpperCAmelCase : Any):
a : Optional[int] = hidden_state
a : Optional[int] = self.layer(__UpperCAmelCase)
a : Optional[Any] = self.shortcut(__UpperCAmelCase)
hidden_state += residual
a : str = self.activation(__UpperCAmelCase)
return hidden_state
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu" , __UpperCAmelCase : int = 4):
super().__init__()
a : Optional[Any] = in_channels != out_channels or stride != 1
a : List[str] = out_channels // reduction
a : Optional[Any] = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity()
)
a : int = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase) , )
a : Union[str, Any] = ACTaFN[activation]
def __snake_case ( self : List[str] , __UpperCAmelCase : Dict):
a : List[str] = hidden_state
a : List[Any] = self.layer(__UpperCAmelCase)
a : Optional[int] = self.shortcut(__UpperCAmelCase)
hidden_state += residual
a : str = self.activation(__UpperCAmelCase)
return hidden_state
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : ResNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ):
super().__init__()
a : Tuple = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
a : Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , activation=config.hidden_act) , *[layer(__UpperCAmelCase , __UpperCAmelCase , activation=config.hidden_act) for _ in range(depth - 1)] , )
def __snake_case ( self : List[str] , __UpperCAmelCase : Tensor):
a : str = input
for layer in self.layers:
a : Tuple = layer(__UpperCAmelCase)
return hidden_state
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : ResNetConfig):
super().__init__()
a : List[str] = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
a : Any = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(__UpperCAmelCase , config.depths[1:]):
self.stages.append(ResNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase))
def __snake_case ( self : int , __UpperCAmelCase : Tensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True):
a : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a : Any = hidden_states + (hidden_state,)
a : int = stage_module(__UpperCAmelCase)
if output_hidden_states:
a : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = ResNetConfig
UpperCAmelCase : List[Any] = """resnet"""
UpperCAmelCase : Optional[Any] = """pixel_values"""
UpperCAmelCase : str = True
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Tuple):
if isinstance(__UpperCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def __snake_case ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Any=False):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Tuple = value
__lowercase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowercase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" ,_a ,)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Union[str, Any]):
super().__init__(__UpperCAmelCase)
a : Tuple = config
a : str = ResNetEmbeddings(__UpperCAmelCase)
a : List[str] = ResNetEncoder(__UpperCAmelCase)
a : Dict = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __snake_case ( self : List[str] , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None):
a : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
a : Any = self.embedder(__UpperCAmelCase)
a : Optional[int] = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a : Tuple = encoder_outputs[0]
a : Dict = self.pooler(__UpperCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,_a ,)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : List[str]):
super().__init__(__UpperCAmelCase)
a : List[str] = config.num_labels
a : Union[str, Any] = ResNetModel(__UpperCAmelCase)
# classification head
a : Tuple = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ):
a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : int = self.resnet(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a : Dict = outputs.pooler_output if return_dict else outputs[1]
a : List[str] = self.classifier(__UpperCAmelCase)
a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : Dict = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : int = "single_label_classification"
else:
a : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
a : Dict = MSELoss()
if self.num_labels == 1:
a : Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
a : Any = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
elif self.config.problem_type == "single_label_classification":
a : Any = CrossEntropyLoss()
a : Optional[int] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
a : Optional[Any] = BCEWithLogitsLoss()
a : Optional[int] = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
if not return_dict:
a : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states)
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" ,_a ,)
class _A ( _a ,_a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict):
super().__init__(__UpperCAmelCase)
super()._init_backbone(__UpperCAmelCase)
a : List[str] = [config.embedding_size] + config.hidden_sizes
a : Tuple = ResNetEmbeddings(__UpperCAmelCase)
a : List[str] = ResNetEncoder(__UpperCAmelCase)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC)
def __snake_case ( self : Dict , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None):
a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : str = self.embedder(__UpperCAmelCase)
a : Union[str, Any] = self.encoder(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a : Optional[int] = outputs.hidden_states
a : Union[str, Any] = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a : str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__UpperCAmelCase , )
| 40 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCamelCase : Tuple =_symbol_database.Default()
lowerCamelCase : List[str] =_descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
lowerCamelCase : str =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCamelCase : Optional[int] =None
lowerCamelCase : Tuple =b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCamelCase : List[str] =45
lowerCamelCase : List[Any] =1581
lowerCamelCase : Optional[int] =1517
lowerCamelCase : Tuple =1570
lowerCamelCase : Dict =1584
lowerCamelCase : Optional[Any] =1793
lowerCamelCase : Dict =1795
lowerCamelCase : Any =1916
lowerCamelCase : Dict =1864
lowerCamelCase : Dict =1905
lowerCamelCase : Dict =1919
lowerCamelCase : Union[str, Any] =2429
lowerCamelCase : List[Any] =2208
lowerCamelCase : List[Any] =2418
lowerCamelCase : List[str] =2323
lowerCamelCase : Dict =2407
# @@protoc_insertion_point(module_scope) | 189 | 0 |
import argparse
import os
import re
import packaging.version
lowerCamelCase : Optional[Any] ='''examples/'''
lowerCamelCase : List[Any] ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase : List[str] ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase : int ='''README.md'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : List[Any] = f.read()
UpperCamelCase__ , UpperCamelCase__ : List[str] = REPLACE_PATTERNS[pattern]
UpperCamelCase__ : Union[str, Any] = replace.replace("VERSION" , __lowerCAmelCase )
UpperCamelCase__ : Tuple = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="examples" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = "🤗 Transformers currently provides the following architectures"
UpperCamelCase__ : Tuple = "1. Want to contribute a new model?"
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : Optional[int] = f.readlines()
# Find the start of the list.
UpperCamelCase__ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase__ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCamelCase__ : str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCamelCase__ : str = f.read()
UpperCamelCase__ : Dict = REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=False ) -> Optional[int]:
UpperCamelCase__ : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCamelCase__ : List[str] = default_version.base_version
elif patch:
UpperCamelCase__ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase__ : Tuple = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase__ : Tuple = input(f'Which version are you releasing? [{default_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Any = default_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : str = get_version()
UpperCamelCase__ : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase__ : int = current_version.base_version
# Check with the user we got that right.
UpperCamelCase__ : List[str] = input(f'Which version are we developing now? [{dev_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Optional[Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase : Optional[Any] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 196 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
UpperCamelCase__ : int = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
UpperCamelCase__ : List[Any] = 0
while number > 0:
UpperCamelCase__ : List[Any] = number % 10
sum_of_digits += last_digit
UpperCamelCase__ : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
UpperCamelCase__ : Optional[Any] = factorial(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = split_and_add(__lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 196 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_UpperCAmelCase : int = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
_UpperCAmelCase : int = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_UpperCAmelCase : List[Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_UpperCAmelCase : Union[str, Any] = sorted(arg_to_scheduler.keys())
_UpperCAmelCase : List[Any] = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class a__ ( pl.LightningModule ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase="base" , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = Path(self.hparams.output_dir )
__lowerCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__lowerCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__lowercase , **__lowercase , )
else:
__lowerCAmelCase = config
__lowerCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , __lowercase , __lowercase ):
assert hasattr(self.config , __lowercase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , __lowercase , getattr(self.hparams , __lowercase ) )
if tokenizer is None:
__lowerCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__lowercase , )
else:
__lowerCAmelCase = tokenizer
__lowerCAmelCase = MODEL_MODES[mode]
if model is None:
__lowerCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__lowercase , )
else:
__lowerCAmelCase = model
def _snake_case (self , *__lowercase , **__lowercase ):
__lowerCAmelCase = self.model_type.from_pretrained(*__lowercase , **__lowercase )
def _snake_case (self ):
__lowerCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__lowerCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__lowerCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def _snake_case (self ):
__lowerCAmelCase = self.model
__lowerCAmelCase = ['''bias''', '''LayerNorm.weight''']
__lowerCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__lowerCAmelCase = Adafactor(
__lowercase , lr=self.hparams.learning_rate , scale_parameter=__lowercase , relative_step=__lowercase )
else:
__lowerCAmelCase = AdamW(
__lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__lowerCAmelCase = optimizer
__lowerCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _snake_case (self , __lowercase , __lowercase ):
return self.validation_step(__lowercase , __lowercase )
def _snake_case (self , __lowercase ):
return self.validation_end(__lowercase )
def _snake_case (self ):
__lowerCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__lowerCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _snake_case (self , __lowercase ):
if stage == "test":
__lowerCAmelCase = len(self.test_dataloader().dataset )
else:
__lowerCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__lowercase )
__lowerCAmelCase = len(self.train_dataloader().dataset )
def _snake_case (self , __lowercase , __lowercase , __lowercase = False ):
raise NotImplementedError('''You must implement this for your task''' )
def _snake_case (self ):
return self.train_loader
def _snake_case (self ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__lowercase )
def _snake_case (self ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__lowercase )
def _snake_case (self , __lowercase ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
__lowercase , list(filter(__lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
__lowerCAmelCase = self.step_count
self.model.save_pretrained(__lowercase )
self.tokenizer.save_pretrained(__lowercase )
@staticmethod
def _snake_case (__lowercase , __lowercase ):
parser.add_argument(
'''--model_name_or_path''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=__lowercase , type=__lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(__lowercase ).parent / '''test_run''' / '''cache''' ) , type=__lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=__lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=__lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=__lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=__lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=__lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=__lowercase , metavar=__lowercase , type=__lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=__lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=__lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=__lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=__lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class a__ ( pl.Callback ):
"""simple docstring"""
def _snake_case (self , __lowercase , __lowercase ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class a__ ( pl.Callback ):
"""simple docstring"""
def _snake_case (self , __lowercase , __lowercase ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__lowercase )
class a__ ( pl.Callback ):
"""simple docstring"""
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
__lowerCAmelCase = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
rank_zero_info('''***** Validation results *****''' )
__lowerCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(__lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowercase , str(metrics[key] ) ) )
def _snake_case (self , __lowercase , __lowercase ):
rank_zero_info('''***** Test results *****''' )
__lowerCAmelCase = trainer.callback_metrics
# Log and save results to file
__lowerCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(__lowercase , '''w''' ) as writer:
for key in sorted(__lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__lowercase , str(metrics[key] ) ) )
def __magic_name__( lowerCamelCase, lowerCamelCase):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''', default=str(Path(lowerCamelCase).parent / '''test_run''' / '''model_checkpoints'''), type=lowerCamelCase, help='''The output directory where the model predictions and checkpoints will be written.''', )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=lowerCamelCase, default='''O2''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_tpu_cores''', dest='''tpu_cores''', type=lowerCamelCase)
parser.add_argument('''--max_grad_norm''', dest='''gradient_clip_val''', default=1.0, type=lowerCamelCase, help='''Max gradient norm''')
parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''')
parser.add_argument('''--do_predict''', action='''store_true''', help='''Whether to run predictions on the test set.''')
parser.add_argument(
'''--gradient_accumulation_steps''', dest='''accumulate_grad_batches''', type=lowerCamelCase, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', )
parser.add_argument('''--seed''', type=lowerCamelCase, default=4_2, help='''random seed for initialization''')
parser.add_argument(
'''--data_dir''', default=str(Path(lowerCamelCase).parent / '''test_run''' / '''dummy-train-data'''), type=lowerCamelCase, help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''', )
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=[], lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase, ):
pl.seed_everything(args.seed)
# init model
__lowerCAmelCase = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=lowerCamelCase)
# add custom checkpoints
if checkpoint_callback is None:
__lowerCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix='''checkpoint''', monitor='''val_loss''', mode='''min''', save_top_k=1)
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase)
if logging_callback is None:
__lowerCAmelCase = LoggingCallback()
__lowerCAmelCase = {}
if args.fpaa:
__lowerCAmelCase = 1_6
if args.gpus > 1:
__lowerCAmelCase = '''auto'''
__lowerCAmelCase = '''ddp'''
__lowerCAmelCase = args.accumulate_grad_batches
__lowerCAmelCase = None
__lowerCAmelCase = '''auto'''
__lowerCAmelCase = pl.Trainer.from_argparse_args(
lowerCamelCase, weights_summary=lowerCamelCase, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=lowerCamelCase, val_check_interval=1, num_sanity_val_steps=2, **lowerCamelCase, )
if args.do_train:
trainer.fit(lowerCamelCase)
else:
print('''RAG modeling tests with new set functions successfuly executed!''')
return trainer
| 174 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCAmelCase = set()
return any(
node not in visited and depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
for node in graph)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
visited.add(lowerCamelCase)
rec_stk.add(lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 174 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''KEY''')
lowerCAmelCase_ = TypeVar('''VAL''')
@dataclass(frozen=_a, slots=_a )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowerCamelCase_ : KEY
lowerCamelCase_ : VAL
class __lowerCAmelCase ( _Item ):
def __init__(self ) -> None:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
def __bool__(self ) -> bool:
'''simple docstring'''
return False
lowerCAmelCase_ = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__(self , __magic_name__ = 8 , __magic_name__ = 0.75 ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : List[str] = capacity_factor
snake_case_ : int = 0
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return hash(__magic_name__ ) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : Optional[Any] = _Item(__magic_name__ , __magic_name__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : List[Any] = _Item(__magic_name__ , __magic_name__ )
return True
else:
return False
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__magic_name__ )
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : int = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : List[str] = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Optional[int] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase (self , __magic_name__ ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : Dict = self._get_bucket_index(__magic_name__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : Tuple = self._get_next_ind(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
if self._try_set(__magic_name__ , __magic_name__ , __magic_name__ ):
break
def __setitem__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__magic_name__ , __magic_name__ )
def __delitem__(self , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(__magic_name__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , __magic_name__ ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self._len
def __iter__(self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 279 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ = 384
A__ = 7
if "tiny" in model_name:
A__ = 96
A__ = (2, 2, 6, 2)
A__ = (3, 6, 12, 24)
elif "small" in model_name:
A__ = 96
A__ = (2, 2, 18, 2)
A__ = (3, 6, 12, 24)
elif "base" in model_name:
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
A__ = 12
A__ = 512
elif "large" in model_name:
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
A__ = 12
A__ = 768
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(4 , in_channel // 4 )
A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(in_channel // 4 , 4 )
A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )
A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 | 1 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _A :
"""simple docstring"""
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any]):
raise NotImplementedError()
def __snake_case ( self : int):
raise NotImplementedError()
class _A ( _a ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : "AutoTokenizer" , __UpperCAmelCase : bool = False , **__UpperCAmelCase : Union[str, Any]):
a : str = tokenizer
a : Dict = skip_prompt
a : List[Any] = decode_kwargs
# variables used in the streaming process
a : Union[str, Any] = []
a : Any = 0
a : Any = True
def __snake_case ( self : List[str] , __UpperCAmelCase : List[str]):
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1")
elif len(value.shape) > 1:
a : List[str] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a : str = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
a : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith("\n"):
a : str = text[self.print_len :]
a : Dict = []
a : Tuple = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCAmelCase) > 0 and self._is_chinese_char(ord(text[-1])):
a : str = text[self.print_len :]
self.print_len += len(__UpperCAmelCase)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a : List[str] = text[self.print_len : text.rfind(" ") + 1]
self.print_len += len(__UpperCAmelCase)
self.on_finalized_text(__UpperCAmelCase)
def __snake_case ( self : int):
# Flush the cache, if it exists
if len(self.token_cache) > 0:
a : Any = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
a : str = text[self.print_len :]
a : Any = []
a : Any = 0
else:
a : Optional[Any] = ""
a : Optional[int] = True
self.on_finalized_text(__UpperCAmelCase , stream_end=__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : bool = False):
print(__UpperCAmelCase , flush=__UpperCAmelCase , end="" if not stream_end else None)
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
class _A ( _a ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : "AutoTokenizer" , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[float] = None , **__UpperCAmelCase : int):
super().__init__(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
a : str = Queue()
a : Tuple = None
a : str = timeout
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False):
self.text_queue.put(__UpperCAmelCase , timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout)
def __iter__( self : List[str]):
return self
def __snake_case ( self : Tuple):
a : Tuple = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 226 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , **__UpperCAmelCase : List[Any]):
super().__init__(**__UpperCAmelCase)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , "vision")
self.check_model_type(__UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCAmelCase : Union[str, List[str]] = None , **__UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
a : List[Any] = kwargs.pop("text_queries")
if isinstance(__UpperCAmelCase , (str, Image.Image)):
a : Any = {"image": image, "candidate_labels": candidate_labels}
else:
a : Optional[int] = image
a : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase)
return results
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : List[Any]):
a : str = {}
if "threshold" in kwargs:
a : Dict = kwargs["threshold"]
if "top_k" in kwargs:
a : str = kwargs["top_k"]
return {}, {}, postprocess_params
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = load_image(inputs["image"])
a : Any = inputs["candidate_labels"]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[Any] = candidate_labels.split(",")
a : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__UpperCAmelCase):
a : int = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework)
a : int = self.image_processor(__UpperCAmelCase , return_tensors=self.framework)
yield {
"is_last": i == len(__UpperCAmelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]):
a : List[Any] = model_inputs.pop("target_size")
a : Optional[int] = model_inputs.pop("candidate_label")
a : List[Any] = model_inputs.pop("is_last")
a : List[Any] = self.model(**__UpperCAmelCase)
a : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=None):
a : Dict = []
for model_output in model_outputs:
a : int = model_output["candidate_label"]
a : Any = BaseModelOutput(__UpperCAmelCase)
a : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
a : Any = outputs["scores"][index].item()
a : str = self._get_bounding_box(outputs["boxes"][index][0])
a : Optional[Any] = {"score": score, "label": label, "box": box}
results.append(__UpperCAmelCase)
a : str = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)
if top_k:
a : Union[str, Any] = results[:top_k]
return results
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : "torch.Tensor"):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
a , a , a , a : List[Any] = box.int().tolist()
a : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 226 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCAmelCase__ = TypeVar('''T''')
lowerCAmelCase__ = Union[List[T], Tuple[T, ...]]
lowerCAmelCase__ = Union[T, List[T], Dict[str, T]]
lowerCAmelCase__ = Union[str, bytes, os.PathLike]
| 130 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 768 , ):
super().__init__()
lowercase__ : List[str] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ):
lowercase__ : Union[str, Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
return self
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 130 | 1 |
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE__ ( logging.LoggerAdapter ):
@staticmethod
def snake_case__ ( _lowerCAmelCase : Optional[Any] ):
__snake_case : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def snake_case__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Tuple ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__snake_case : Optional[int] = kwargs.pop("""main_process_only""" , __a )
__snake_case : Any = kwargs.pop("""in_order""" , __a )
if self.isEnabledFor(__a ):
if self._should_log(__a ):
__snake_case , __snake_case : Union[str, Any] = self.process(__a , __a )
self.logger.log(__a , __a , *__a , **__a )
elif in_order:
__snake_case : Any = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__snake_case , __snake_case : Any = self.process(__a , __a )
self.logger.log(__a , __a , *__a , **__a )
state.wait_for_everyone()
def __lowerCAmelCase ( __UpperCAmelCase : str , __UpperCAmelCase : str = None ):
'''simple docstring'''
if log_level is None:
__snake_case : Tuple = os.environ.get("""ACCELERATE_LOG_LEVEL""" , snake_case_ )
__snake_case : Tuple = logging.getLogger(snake_case_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case_ , {} )
| 367 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ : Optional[Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
UpperCamelCase__ : Optional[int] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ : str = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 146 |
def _a ( SCREAMING_SNAKE_CASE : int = 1000000 ):
"""simple docstring"""
UpperCamelCase__ : Any = set(range(3 , SCREAMING_SNAKE_CASE , 2 ) )
primes.add(2 )
for p in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase__ : Union[str, Any] = [float(SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 146 | 1 |
def __lowerCamelCase ( UpperCamelCase__ = 2000000 ):
'''simple docstring'''
snake_case_ = [0 for i in range(n + 1 )]
snake_case_ = 1
snake_case_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase__ ):
snake_case_ = 1
snake_case_ = 0
for i in range(UpperCamelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 200 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = '''roformer'''
def __init__( self , snake_case=5_0000 , snake_case=None , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1536 , snake_case=2 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(pad_token_id=snake_case , **snake_case )
snake_case_ = vocab_size
snake_case_ = hidden_size if embedding_size is None else embedding_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = rotary_value
snake_case_ = use_cache
class lowercase ( lowercase_ ):
@property
def a ( self ):
if self.task == "multiple-choice":
snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
snake_case_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 200 | 1 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Dict =VQModel
a : str ="sample"
@property
def lowercase__ ( self , snake_case__=(32, 32) ):
"""simple docstring"""
lowerCAmelCase : List[str] = 4
lowerCAmelCase : int = 3
lowerCAmelCase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[int] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase : List[str] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase : Union[str, Any] = image.to(snake_case__ )
with torch.no_grad():
lowerCAmelCase : int = model(snake_case__ ).sample
lowerCAmelCase : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
| 108 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """visual_bert"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = visual_embedding_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bypass_transformer
UpperCamelCase__ = special_visual_initialize
| 244 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _lowerCAmelCase( self , __lowerCAmelCase=0 ) -> List[Any]:
lowercase__ : Union[str, Any] = np.random.RandomState(__lowerCAmelCase )
lowercase__ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Tuple = self.get_dummy_inputs()
lowercase__ : Union[str, Any] = pipe(**__lowerCAmelCase ).images
lowercase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ : Optional[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> str:
lowercase__ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : List[str] = pipe(**__lowerCAmelCase ).images
lowercase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ : Tuple = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : Union[str, Any] = pipe(**__lowerCAmelCase ).images
lowercase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ : List[str] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Union[str, Any] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**__lowerCAmelCase ).images
lowercase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ : int = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[Any] = self.get_dummy_inputs()
lowercase__ : Optional[int] = pipe(**__lowerCAmelCase ).images
lowercase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ : Optional[int] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowercase__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : int = pipe(**__lowerCAmelCase ).images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__ : str = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : Union[str, Any] = 3 * [inputs['''prompt''']]
# forward
lowercase__ : Optional[int] = pipe(**__lowerCAmelCase )
lowercase__ : Tuple = output.images[0, -3:, -3:, -1]
lowercase__ : Any = self.get_dummy_inputs()
lowercase__ : Any = 3 * [inputs.pop('''prompt''' )]
lowercase__ : Any = pipe.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''np''' , )
lowercase__ : int = text_inputs['''input_ids''']
lowercase__ : Any = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowercase__ : Tuple = prompt_embeds
# forward
lowercase__ : str = pipe(**__lowerCAmelCase )
lowercase__ : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : Union[str, Any] = 3 * ['''this is a negative prompt''']
lowercase__ : Union[str, Any] = negative_prompt
lowercase__ : Optional[int] = 3 * [inputs['''prompt''']]
# forward
lowercase__ : Optional[Any] = pipe(**__lowerCAmelCase )
lowercase__ : Any = output.images[0, -3:, -3:, -1]
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : Dict = 3 * [inputs.pop('''prompt''' )]
lowercase__ : Optional[Any] = []
for p in [prompt, negative_prompt]:
lowercase__ : Optional[int] = pipe.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''np''' , )
lowercase__ : Union[str, Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowercase__ , lowercase__ : Tuple = embeds
# forward
lowercase__ : str = pipe(**__lowerCAmelCase )
lowercase__ : int = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = ort.SessionOptions()
lowercase__ : Tuple = False
return options
def _lowerCAmelCase( self ) -> Tuple:
# using the PNDM scheduler by default
lowercase__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[int] = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
lowercase__ : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
lowercase__ : str = output.images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Tuple = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowercase__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Dict = '''open neural network exchange'''
lowercase__ : Union[str, Any] = np.random.RandomState(0 )
lowercase__ : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type='''np''' )
lowercase__ : Dict = output.images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Dict = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowercase__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Union[str, Any] = '''open neural network exchange'''
lowercase__ : Union[str, Any] = np.random.RandomState(0 )
lowercase__ : Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type='''np''' )
lowercase__ : Tuple = output.images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Tuple = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase( self ) -> int:
lowercase__ : Optional[Any] = 0
def test_callback_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
lowercase__ : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowercase__ : List[Any] = latents[0, -3:, -3:, -1]
lowercase__ : List[str] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowercase__ : Dict = latents[0, -3:, -3:, -1]
lowercase__ : List[str] = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowercase__ : Optional[Any] = False
lowercase__ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : List[str] = '''Andromeda galaxy in a bottle'''
lowercase__ : Dict = np.random.RandomState(0 )
pipe(
prompt=__lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : int = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase__ : Tuple = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase__ : int = OnnxStableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 214 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase__ : str = 0
print(UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase , end=''',''' )
lowercase__ : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = [1, 3, 0, 5, 8, 5]
__a: Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 214 | 1 |
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowerCamelCase_ : str = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 81 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=16 , a_=13 , a_=7 , a_=14 , a_=10 , a_=19 , a_=5 , a_=4 , a_=True , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=[1, 2, 3, 4, 5] , a_=25 , a_=5 , ):
'''simple docstring'''
__snake_case : List[str] = d_model
__snake_case : str = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : List[Any] = prediction_length
__snake_case : Tuple = context_length
__snake_case : Optional[int] = cardinality
__snake_case : Tuple = num_time_features
__snake_case : Union[str, Any] = lags_sequence
__snake_case : Tuple = embedding_dimension
__snake_case : int = is_training
__snake_case : List[str] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = context_length
__snake_case : str = prediction_length + label_length
__snake_case : Union[str, Any] = label_length
__snake_case : Dict = moving_average
__snake_case : Tuple = autocorrelation_factor
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = config.context_length + max(config.lags_sequence )
__snake_case : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
__snake_case : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : Any = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : Dict = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.get_config()
__snake_case : List[Any] = self.prepare_autoformer_inputs_dict(a_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : int = AutoformerModel(config=a_ ).to(a_ ).eval()
__snake_case : Union[str, Any] = model(**a_ )
__snake_case : str = outputs.encoder_last_hidden_state
__snake_case : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = model.get_encoder()
encoder.save_pretrained(a_ )
__snake_case : Any = AutoformerEncoder.from_pretrained(a_ ).to(a_ )
__snake_case : List[str] = model.create_network_inputs(**a_ )
__snake_case : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : Optional[int] = encoder(inputs_embeds=a_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__snake_case : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = model.get_decoder()
decoder.save_pretrained(a_ )
__snake_case : List[str] = AutoformerDecoder.from_pretrained(a_ ).to(a_ )
__snake_case : Optional[int] = decoder(
trend=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCamelCase__ =(AutoformerForPrediction,) if is_torch_available() else ()
lowerCamelCase__ ={'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = AutoformerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
__snake_case : List[str] = model_class.from_pretrained(a_ , output_loading_info=a_ )
self.assertEqual(info['''missing_keys'''] , [] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = inspect.signature(getattr(a_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__snake_case : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(a_ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(a_ )] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = True
__snake_case : Optional[int] = getattr(self.model_tester , '''seq_length''' , a_ )
__snake_case : Optional[int] = getattr(self.model_tester , '''decoder_seq_length''' , a_ )
__snake_case : List[Any] = getattr(self.model_tester , '''encoder_seq_length''' , a_ )
__snake_case : Any = getattr(self.model_tester , '''d_model''' , a_ )
__snake_case : int = getattr(self.model_tester , '''num_attention_heads''' , a_ )
__snake_case : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : List[str] = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Any = True
__snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.encoder_attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : List[str] = len(a_ )
__snake_case : Optional[int] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a_ , a_ )
# decoder attentions
__snake_case : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : Optional[int] = outputs.cross_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : Any = True
__snake_case : Optional[Any] = True
__snake_case : Dict = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + 2 , len(a_ ) )
__snake_case : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowercase ( _snake_case : Optional[Any]="train-batch.pt" ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=_snake_case , repo_type='''dataset''' )
__snake_case : int = torch.load(_snake_case , map_location=_snake_case )
return batch
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Union[str, Any] = prepare_batch()
with torch.no_grad():
__snake_case : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__snake_case : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Tuple = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__snake_case : Any = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__snake_case : List[Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a_ )
__snake_case : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(a_ )
__snake_case : Optional[int] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__snake_case : List[str] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__snake_case : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a_ )
__snake_case : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=a_ )
__snake_case : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a_ , rtol=1E-1 ) )
| 357 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 0 |
from __future__ import annotations
A__ = tuple[int, int, int]
A__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
A__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
A__ = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
A__ = """FOBHMDKEXQNRAULPGSJVTYICZW"""
A__ = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
A__ = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
A__ = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
A__ = """SGLCPQWZHKXAREONTFBVIYJUDM"""
A__ = """HVSICLTYKQUBXDWAJZOMFGPREN"""
A__ = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
A__ = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
A__ = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if (unique_rotsel := len(set(snake_case ) )) < 3:
_lowerCAmelCase = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(snake_case )
# Checks if rotor positions are valid
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rotpos
if not 0 < rotorposa <= len(snake_case ):
_lowerCAmelCase = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(snake_case )
if not 0 < rotorposa <= len(snake_case ):
_lowerCAmelCase = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(snake_case )
if not 0 < rotorposa <= len(snake_case ):
_lowerCAmelCase = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(snake_case )
# Validates string and returns dict
_lowerCAmelCase = _plugboard(snake_case )
return rotpos, rotsel, pbdict
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'Plugboard setting isn\'t type string ({type(snake_case )})'
raise TypeError(snake_case )
elif len(snake_case ) % 2 != 0:
_lowerCAmelCase = F'Odd number of symbols ({len(snake_case )})'
raise Exception(snake_case )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
_lowerCAmelCase = set()
for i in pbstring:
if i not in abc:
_lowerCAmelCase = F'\'{i}\' not in list of symbols'
raise Exception(snake_case )
elif i in tmppbl:
_lowerCAmelCase = F'Duplicate symbol ({i})'
raise Exception(snake_case )
else:
tmppbl.add(snake_case )
del tmppbl
# Created the dictionary
_lowerCAmelCase = {}
for j in range(0 , len(snake_case ) - 1 , 2 ):
_lowerCAmelCase = pbstring[j + 1]
_lowerCAmelCase = pbstring[j]
return pb
def _UpperCAmelCase ( snake_case , snake_case , snake_case = (rotora, rotora, rotora) , snake_case = "" , ):
"""simple docstring"""
_lowerCAmelCase = text.upper()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = _validator(
snake_case , snake_case , plugb.upper() )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rotor_position
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCAmelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCAmelCase = plugboard[symbol]
# rotor ra --------------------------
_lowerCAmelCase = abc.index(snake_case ) + rotorposa
_lowerCAmelCase = rotora[index % len(snake_case )]
# rotor rb --------------------------
_lowerCAmelCase = abc.index(snake_case ) + rotorposa
_lowerCAmelCase = rotora[index % len(snake_case )]
# rotor rc --------------------------
_lowerCAmelCase = abc.index(snake_case ) + rotorposa
_lowerCAmelCase = rotora[index % len(snake_case )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCAmelCase = reflector[symbol]
# 2nd rotors
_lowerCAmelCase = abc[rotora.index(snake_case ) - rotorposa]
_lowerCAmelCase = abc[rotora.index(snake_case ) - rotorposa]
_lowerCAmelCase = abc[rotora.index(snake_case ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCAmelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(snake_case ):
_lowerCAmelCase = 0
rotorposa += 1
if rotorposa >= len(snake_case ):
_lowerCAmelCase = 0
rotorposa += 1
if rotorposa >= len(snake_case ):
_lowerCAmelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(snake_case )
return "".join(snake_case )
if __name__ == "__main__":
A__ = """This is my Python script that emulates the Enigma machine from WWII."""
A__ = (1, 1, 1)
A__ = """pictures"""
A__ = (rotora, rotora, rotora)
A__ = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 82 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Any = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : str = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 208 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 208 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowercase_ = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) ->Tuple:
if got_ver is None or want_ver is None:
raise ValueError(
F'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
F' reinstalling {pkg}.' )
if not ops[op](version.parse(__lowerCamelCase ) , version.parse(__lowerCamelCase ) ):
raise ImportError(
F'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) ->None:
_SCREAMING_SNAKE_CASE = F'\n{hint}' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = requirement, None, None
else:
_SCREAMING_SNAKE_CASE = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , __lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F' got {requirement}' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = match[0]
_SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements
_SCREAMING_SNAKE_CASE = {}
for w in want_range:
_SCREAMING_SNAKE_CASE = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , __lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F' but got {requirement}' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = match[0]
_SCREAMING_SNAKE_CASE = want_ver
if op not in ops:
raise ValueError(F'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
_SCREAMING_SNAKE_CASE = """.""".join([str(__lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return
# check if any version is installed
try:
_SCREAMING_SNAKE_CASE = importlib.metadata.version(__lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] ) ->str:
_SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__lowerCamelCase , __lowerCamelCase )
| 58 | 1 |
from ... import PretrainedConfig
snake_case : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Tuple = '''nezha'''
def __init__( self :Optional[int] ,__snake_case :Any=2_11_28 ,__snake_case :Any=7_68 ,__snake_case :Union[str, Any]=12 ,__snake_case :Tuple=12 ,__snake_case :int=30_72 ,__snake_case :str="gelu" ,__snake_case :Dict=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :List[str]=5_12 ,__snake_case :str=64 ,__snake_case :str=2 ,__snake_case :int=0.02 ,__snake_case :Optional[int]=1E-12 ,__snake_case :Dict=0.1 ,__snake_case :Tuple=0 ,__snake_case :int=2 ,__snake_case :Optional[Any]=3 ,__snake_case :str=True ,**__snake_case :Optional[Any] ,) -> int:
super().__init__(pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = max_relative_position
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = classifier_dropout
a__ = use_cache
| 109 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ , a__ = array[indexa], array[indexa]
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 109 | 1 |
"""simple docstring"""
_a : Tuple = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_a : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_a : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_a : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_a : Optional[Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_a : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_a : Union[str, Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_a : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 44 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Dict = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = inputs["""prompt"""]
lowerCAmelCase_ :Optional[int] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Optional[int] = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""image"""]
else:
lowerCAmelCase_ :int = None
if "mask_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""mask_image"""]
else:
lowerCAmelCase_ :int = None
if "original_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""original_image"""]
else:
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A )
# inputs with prompt converted to embeddings
lowerCAmelCase_ :List[str] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :int = image
if mask_image is not None:
lowerCAmelCase_ :Tuple = mask_image
if original_image is not None:
lowerCAmelCase_ :Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
lowerCAmelCase_ :Optional[int] = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Tuple = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase_ :Tuple = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :Optional[int] = image
if mask_image is not None:
lowerCAmelCase_ :str = mask_image
if original_image is not None:
lowerCAmelCase_ :Tuple = original_image
lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Dict = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
| 84 | 0 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )] )
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[int | str] , SCREAMING_SNAKE_CASE__ : list[int | str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , ) -> None:
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase : List[Any] = True
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ )
current_sequence.pop()
_UpperCAmelCase : List[str] = False
_lowerCAmelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 202 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : List[str]
__SCREAMING_SNAKE_CASE : Optional[List[str]]
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'train'
__SCREAMING_SNAKE_CASE : Tuple = 'dev'
__SCREAMING_SNAKE_CASE : Optional[int] = 'test'
class UpperCAmelCase_ :
@staticmethod
def snake_case_ ( A : Union[str, Any] , A : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : str ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : List[InputExample] , A : List[str] , A : int , A : PreTrainedTokenizer , A : Optional[int]=False , A : List[str]="[CLS]" , A : List[Any]=1 , A : str="[SEP]" , A : int=False , A : int=False , A : Any=0 , A : List[str]=0 , A : Dict=-1_0_0 , A : str=0 , A : Optional[Any]=True , ):
_UpperCAmelCase : Dict = {label: i for i, label in enumerate(A )}
_UpperCAmelCase : str = []
for ex_index, example in enumerate(A ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , A , len(A ) )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
for word, label in zip(example.words , example.labels ):
_UpperCAmelCase : str = tokenizer.tokenize(A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A ) > 0:
tokens.extend(A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add()
if len(A ) > max_seq_length - special_tokens_count:
_UpperCAmelCase : List[Any] = tokens[: (max_seq_length - special_tokens_count)]
_UpperCAmelCase : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCAmelCase : Dict = [sequence_a_segment_id] * len(A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCAmelCase : str = [cls_token] + tokens
_UpperCAmelCase : Dict = [pad_token_label_id] + label_ids
_UpperCAmelCase : Any = [cls_token_segment_id] + segment_ids
_UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCAmelCase : List[Any] = [1 if mask_padding_with_zero else 0] * len(A )
# Zero-pad up to the sequence length.
_UpperCAmelCase : List[str] = max_seq_length - len(A )
if pad_on_left:
_UpperCAmelCase : str = ([pad_token] * padding_length) + input_ids
_UpperCAmelCase : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCAmelCase : Any = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCAmelCase : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(A ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(A ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(A ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(A ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : Dict = None
features.append(
InputFeatures(
input_ids=A , attention_mask=A , token_type_ids=A , label_ids=A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : List[str]=False , A : Split = Split.train , ):
# Load data features from cache or dataset file
_UpperCAmelCase : int = os.path.join(
A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[str] = cached_features_file + ".lock"
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_UpperCAmelCase : Tuple = torch.load(A )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_UpperCAmelCase : List[str] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[Any] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , A )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : List[str] , A : Optional[Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = -1_0_0
def __init__( self : Tuple , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : Optional[Any]=False , A : Split = Split.train , ):
_UpperCAmelCase : Union[str, Any] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[str] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : List[str] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_UpperCAmelCase : List[Any] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case_ ( self : str ):
_UpperCAmelCase : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : int , A : int ):
return self.features[i]
| 202 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
lowerCAmelCase_ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
lowerCAmelCase_ = {
'''ctrl''': 2_5_6,
}
lowerCAmelCase_ = {
'''Pregnancy''': 1_6_8_6_2_9,
'''Christianity''': 7_6_7_5,
'''Explain''': 1_0_6_4_2_3,
'''Fitness''': 6_3_4_4_0,
'''Saving''': 6_3_1_6_3,
'''Ask''': 2_7_1_7_1,
'''Ass''': 9_5_9_8_5,
'''Joke''': 1_6_3_5_0_9,
'''Questions''': 4_5_6_2_2,
'''Thoughts''': 4_9_6_0_5,
'''Retail''': 5_2_3_4_2,
'''Feminism''': 1_6_4_3_3_8,
'''Writing''': 1_1_9_9_2,
'''Atheism''': 1_9_2_2_6_3,
'''Netflix''': 4_8_6_1_6,
'''Computing''': 3_9_6_3_9,
'''Opinion''': 4_3_2_1_3,
'''Alone''': 4_4_9_6_7,
'''Funny''': 5_8_9_1_7,
'''Gaming''': 4_0_3_5_8,
'''Human''': 4_0_8_8,
'''India''': 1_3_3_1,
'''Joker''': 7_7_1_3_8,
'''Diet''': 3_6_2_0_6,
'''Legal''': 1_1_8_5_9,
'''Norman''': 4_9_3_9,
'''Tip''': 7_2_6_8_9,
'''Weight''': 5_2_3_4_3,
'''Movies''': 4_6_2_7_3,
'''Running''': 2_3_4_2_5,
'''Science''': 2_0_9_0,
'''Horror''': 3_7_7_9_3,
'''Confession''': 6_0_5_7_2,
'''Finance''': 1_2_2_5_0,
'''Politics''': 1_6_3_6_0,
'''Scary''': 1_9_1_9_8_5,
'''Support''': 1_2_6_5_4,
'''Technologies''': 3_2_5_1_6,
'''Teenage''': 6_6_1_6_0,
'''Event''': 3_2_7_6_9,
'''Learned''': 6_7_4_6_0,
'''Notion''': 1_8_2_7_7_0,
'''Wikipedia''': 3_7_5_8_3,
'''Books''': 6_6_6_5,
'''Extract''': 7_6_0_5_0,
'''Confessions''': 1_0_2_7_0_1,
'''Conspiracy''': 7_5_9_3_2,
'''Links''': 6_3_6_7_4,
'''Narcissus''': 1_5_0_4_2_5,
'''Relationship''': 5_4_7_6_6,
'''Relationships''': 1_3_4_7_9_6,
'''Reviews''': 4_1_6_7_1,
'''News''': 4_2_5_6,
'''Translation''': 2_6_8_2_0,
'''multilingual''': 1_2_8_4_0_6,
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : int = set()
snake_case_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Union[str, Any] = char
snake_case_ : str = set(_UpperCamelCase )
return pairs
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : int = CONTROL_CODES
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="<unk>" , **__magic_name__ ) -> Tuple:
'''simple docstring'''
super().__init__(unk_token=__magic_name__ , **__magic_name__ )
with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle:
snake_case_ : Tuple = json.load(__magic_name__ )
snake_case_ : Any = {v: k for k, v in self.encoder.items()}
with open(__magic_name__ , encoding='''utf-8''' ) as merges_handle:
snake_case_ : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
snake_case_ : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
snake_case_ : Tuple = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : List[str] = {}
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : Union[str, Any] = tuple(__magic_name__ )
snake_case_ : List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
snake_case_ : List[Any] = get_pairs(__magic_name__ )
if not pairs:
return token
while True:
snake_case_ : Any = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Optional[Any] = bigram
snake_case_ : str = []
snake_case_ : Any = 0
while i < len(__magic_name__ ):
try:
snake_case_ : Tuple = word.index(__magic_name__ , __magic_name__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Tuple = j
if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Optional[Any] = tuple(__magic_name__ )
snake_case_ : Union[str, Any] = new_word
if len(__magic_name__ ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(__magic_name__ )
snake_case_ : Optional[int] = '''@@ '''.join(__magic_name__ )
snake_case_ : Tuple = word[:-4]
snake_case_ : int = word
return word
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : Tuple = re.findall(R'''\S+\n?''' , __magic_name__ )
for token in words:
split_tokens.extend(list(self.bpe(__magic_name__ ).split(''' ''' ) ) )
return split_tokens
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
return self.decoder.get(__magic_name__ , self.unk_token )
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Dict = ''' '''.join(__magic_name__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Tuple = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : int = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' )
snake_case_ : List[str] = 0
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
snake_case_ : Dict = token_index
writer.write(''' '''.join(__magic_name__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 279 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 279 | 1 |
class lowerCamelCase__ ( _a ):
pass
class lowerCamelCase__ ( _a ):
pass
class lowerCamelCase__ :
def __init__( self : Optional[int] ):
a__: Optional[Any] =[
[],
[],
[],
]
def _lowerCamelCase ( self : List[Any] , _a : int , _a : int ):
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(_a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _lowerCamelCase ( self : Any ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self : int ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class lowerCamelCase__ :
def __init__( self : Optional[Any] ):
a__: List[Any] =[]
def _lowerCamelCase ( self : Any , _a : int ):
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(_a )
def _lowerCamelCase ( self : Any ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
a__: List[Any] =min(self.queue )
self.queue.remove(_a )
return data
def __str__( self : str ):
return str(self.queue )
def __lowerCamelCase ( ):
a__: List[Any] =FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(__magic_name__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__magic_name__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCamelCase ( ):
a__: List[str] =ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(__magic_name__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__magic_name__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 42 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__UpperCAmelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = '''left'''
def __init__( self : Dict , _a : List[Any] , _a : Any=False , _a : int=True , _a : Union[str, Any]=False , _a : Dict="<s>" , _a : str="</s>" , _a : Optional[int]="<unk>" , _a : Union[str, Any]="<sep>" , _a : List[Any]="<pad>" , _a : Optional[Any]="<cls>" , _a : str="<mask>" , _a : Any=["<eop>", "<eod>"] , _a : Optional[Dict[str, Any]] = None , **_a : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
a__: Dict =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
a__: Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
a__: Dict =3
a__: Tuple =do_lower_case
a__: int =remove_space
a__: List[Any] =keep_accents
a__: List[str] =vocab_file
a__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _lowerCamelCase ( self : Any ):
return len(self.sp_model )
def _lowerCamelCase ( self : List[Any] ):
a__: Dict ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
a__: Dict =self.__dict__.copy()
a__: List[Any] =None
return state
def __setstate__( self : Optional[Any] , _a : Tuple ):
a__: List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__: List[str] ={}
a__: int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : Dict , _a : str ):
if self.remove_space:
a__: Optional[int] =" ".join(inputs.strip().split() )
else:
a__: Optional[int] =inputs
a__: Dict =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__: Optional[int] =unicodedata.normalize("NFKD" , _a )
a__: int ="".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
a__: Dict =outputs.lower()
return outputs
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Dict =self.preprocess_text(_a )
a__: Dict =self.sp_model.encode(_a , out_type=_a )
a__: str =[]
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__: Optional[Any] =self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__: Optional[int] =cur_pieces[1:]
else:
a__: Tuple =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCamelCase ( self : Dict , _a : Dict ):
return self.sp_model.PieceToId(_a )
def _lowerCamelCase ( self : Dict , _a : Optional[Any] ):
return self.sp_model.IdToPiece(_a )
def _lowerCamelCase ( self : Optional[Any] , _a : Tuple ):
a__: Tuple ="".join(_a ).replace(_a , " " ).strip()
return out_string
def _lowerCamelCase ( self : Optional[int] , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : Union[str, Any] , ):
a__: Optional[int] =kwargs.pop("use_source_tokenizer" , _a )
a__: Any =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: List[str] =[]
a__: Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
a__: List[str] =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Union[str, Any] ="".join(_a )
a__: List[Any] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Optional[int] =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _lowerCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Dict =[self.sep_token_id]
a__: Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCamelCase ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ):
a__: Any =[self.sep_token_id]
a__: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__: List[Any] =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
a__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 42 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool:
return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __a ( A__ , A__ ):
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE : int = 1_28 , SCREAMING_SNAKE_CASE : int = 2_56 , SCREAMING_SNAKE_CASE : float = 2_0_0_0.0 , SCREAMING_SNAKE_CASE : int = 7_68 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 20_48 , SCREAMING_SNAKE_CASE : float = 0.1 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Optional[Any] = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , )
UpperCamelCase__ : Optional[int] = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE ):
# FiLM conditional T5 decoder
UpperCamelCase__ : Optional[int] = DecoderLayer(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
self.decoders.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase__ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase__ : Any = self.conditioning_emb(SCREAMING_SNAKE_CASE ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase__ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase__ : Optional[int] = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase__ : Dict = self.position_encoding(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE )
inputs += position_encodings
UpperCamelCase__ : Optional[Any] = self.dropout(SCREAMING_SNAKE_CASE )
# decoder: No padding present.
UpperCamelCase__ : Dict = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase__ : Optional[int] = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase__ : int = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase__ : List[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase__ : int = lyr(
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase__ : Tuple = self.decoder_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.post_dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.spec_out(SCREAMING_SNAKE_CASE )
return spec_out
class __a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.layer[0](
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
UpperCamelCase__ : int = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase__ : Tuple = self.layer[1](
SCREAMING_SNAKE_CASE , key_value_states=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase__ : Any = self.layer[-1](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (hidden_states,)
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : List[Any] = self.FiLMLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Self-attention block
UpperCamelCase__ : Optional[Any] = self.attention(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : str = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.attention(
SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase__ : Optional[Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return layer_output
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Any = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : Optional[int] = self.film(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.DenseReluDense(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Tuple = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = nn.Dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = NewGELUActivation()
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.act(self.wi_a(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Dict = self.wi_a(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = hidden_gelu * hidden_linear
UpperCamelCase__ : int = self.dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.wo(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Any = eps
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase__ : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __a ( nn.Module ):
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(SCREAMING_SNAKE_CASE , 3.0 )) ))
class __a ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = nn.Linear(SCREAMING_SNAKE_CASE , out_features * 2 , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : str = self.scale_bias(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : List[str] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , -1 )
UpperCamelCase__ : Dict = x * (1 + scale) + shift
return x | 189 | 0 |
'''simple docstring'''
from copy import deepcopy
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase = None , UpperCAmelCase = None ) -> None:
if arr is None and size is not None:
_snake_case = size
_snake_case = [0] * size
elif arr is not None:
self.init(UpperCAmelCase )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowercase (self , UpperCAmelCase ) -> None:
_snake_case = len(UpperCAmelCase )
_snake_case = deepcopy(UpperCAmelCase )
for i in range(1 , self.size ):
_snake_case = self.next_(UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def lowercase (self ) -> list[int]:
_snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_snake_case = self.next_(UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowercase (UpperCAmelCase ) -> int:
return index + (index & (-index))
@staticmethod
def lowercase (UpperCAmelCase ) -> int:
return index - (index & (-index))
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_snake_case = self.next_(UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> None:
self.add(UpperCAmelCase , value - self.get(UpperCAmelCase ) )
def lowercase (self , UpperCAmelCase ) -> int:
if right == 0:
return 0
_snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_snake_case = self.prev(UpperCAmelCase )
return result
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
return self.prefix(UpperCAmelCase ) - self.prefix(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> int:
return self.query(UpperCAmelCase , index + 1 )
def lowercase (self , UpperCAmelCase ) -> int:
value -= self.tree[0]
if value < 0:
return -1
_snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
) | 270 | 0 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a__ : str = logging.get_logger(__name__)
a__ : List[Any] = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
@add_start_docstrings(_A )
def __call__( self :Tuple , _A :torch.LongTensor , _A :torch.FloatTensor , **_A :Dict ) -> bool:
'''simple docstring'''
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :int , _A :int , _A :Optional[int] = None ) -> Tuple:
'''simple docstring'''
__A = max_length
__A = max_position_embeddings
@add_start_docstrings(_A )
def __call__( self :int , _A :torch.LongTensor , _A :torch.FloatTensor , **_A :Any ) -> bool:
'''simple docstring'''
__A = input_ids.shape[-1]
__A = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
F'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
'exceptions, performance degradation, or nothing at all.' )
return is_done
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :int , _A :int ) -> Tuple:
'''simple docstring'''
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
F'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
'with `max_length = start_length + max_new_tokens` instead.' , _A , )
__A = start_length
__A = max_new_tokens
__A = start_length + max_new_tokens
@add_start_docstrings(_A )
def __call__( self :List[str] , _A :torch.LongTensor , _A :torch.FloatTensor , **_A :Optional[Any] ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :float , _A :Optional[float] = None ) -> List[str]:
'''simple docstring'''
__A = max_time
__A = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_A )
def __call__( self :Dict , _A :torch.LongTensor , _A :torch.FloatTensor , **_A :Union[str, Any] ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
@add_start_docstrings(_A )
def __call__( self :List[str] , _A :torch.LongTensor , _A :torch.FloatTensor , **_A :Tuple ) -> bool:
'''simple docstring'''
return any(criteria(_A , _A ) for criteria in self )
@property
def lowercase_ ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(_A , _A ):
return stopping_criterium.max_length
elif isinstance(_A , _A ):
return stopping_criterium.max_length
return None
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> StoppingCriteriaList:
"""simple docstring"""
__A = stopping_criteria.max_length
__A = deepcopy(UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCAmelCase ) )
return new_stopping_criteria
| 161 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
a__ : str = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 | 1 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = {}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=1 ):
'''simple docstring'''
if self.graph.get(UpperCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__UpperCAmelCase : Optional[int] = [[w, v]]
if not self.graph.get(UpperCamelCase ):
__UpperCAmelCase : Union[str, Any] = []
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return list(self.graph )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
if self.graph.get(UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[int]=-2 , UpperCamelCase : Union[str, Any]=-1 ):
'''simple docstring'''
if s == d:
return []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[str] = []
if s == -2:
__UpperCAmelCase : List[Any] = list(self.graph )[0]
stack.append(UpperCamelCase )
visited.append(UpperCamelCase )
__UpperCAmelCase : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase ) != 0:
__UpperCAmelCase : Dict = stack[len(UpperCamelCase ) - 1]
else:
__UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(UpperCamelCase ) == 0:
return visited
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
if c == -1:
__UpperCAmelCase : Optional[Any] = floor(random() * 10_000 ) + 10
for i in range(UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__UpperCAmelCase : int = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase , UpperCamelCase , 1 )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[str]=-2 ):
'''simple docstring'''
__UpperCAmelCase : str = deque()
__UpperCAmelCase : Dict = []
if s == -2:
__UpperCAmelCase : List[Any] = list(self.graph )[0]
d.append(UpperCamelCase )
visited.append(UpperCamelCase )
while d:
__UpperCAmelCase : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : str , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Any = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return len(self.graph[u] )
def lowerCamelCase__ ( self : str , UpperCamelCase : int=-2 ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Tuple = []
if s == -2:
__UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(UpperCamelCase )
visited.append(UpperCamelCase )
__UpperCAmelCase : Dict = s
__UpperCAmelCase : Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase ) != 0:
__UpperCAmelCase : Optional[int] = stack[len(UpperCamelCase ) - 1]
else:
__UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(UpperCamelCase ) == 0:
return sorted_nodes
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(UpperCamelCase )
visited.append(UpperCamelCase )
__UpperCAmelCase : List[str] = -2
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = s
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase : Union[str, Any] = len(UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase : Tuple = True
if len(UpperCamelCase ) != 0:
__UpperCAmelCase : Tuple = stack[len(UpperCamelCase ) - 1]
else:
__UpperCAmelCase : Dict = False
indirect_parents.append(UpperCamelCase )
__UpperCAmelCase : int = s
__UpperCAmelCase : int = ss
# check if se have reached the starting point
if len(UpperCamelCase ) == 0:
return list(UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[Any] = list(self.graph )[0]
stack.append(UpperCamelCase )
visited.append(UpperCamelCase )
__UpperCAmelCase : List[str] = -2
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = s
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase : Dict = len(UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase : int = True
if len(UpperCamelCase ) != 0:
__UpperCAmelCase : Tuple = stack[len(UpperCamelCase ) - 1]
else:
__UpperCAmelCase : Optional[Any] = False
indirect_parents.append(UpperCamelCase )
__UpperCAmelCase : Tuple = s
__UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase ) == 0:
return False
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any]=-2 , UpperCamelCase : Tuple=-1 ):
'''simple docstring'''
__UpperCAmelCase : Dict = time()
self.dfs(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Dict = time()
return end - begin
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any]=-2 ):
'''simple docstring'''
__UpperCAmelCase : int = time()
self.bfs(UpperCamelCase )
__UpperCAmelCase : str = time()
return end - begin
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = {}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=1 ):
'''simple docstring'''
if self.graph.get(UpperCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__UpperCAmelCase : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__UpperCAmelCase : Any = [[w, u]]
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Tuple ):
'''simple docstring'''
if self.graph.get(UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase )
# the other way round
if self.graph.get(UpperCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, Any]=-2 , UpperCamelCase : int=-1 ):
'''simple docstring'''
if s == d:
return []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = []
if s == -2:
__UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(UpperCamelCase )
visited.append(UpperCamelCase )
__UpperCAmelCase : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase ) != 0:
__UpperCAmelCase : List[str] = stack[len(UpperCamelCase ) - 1]
else:
__UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(UpperCamelCase ) == 0:
return visited
def lowerCamelCase__ ( self : Dict , UpperCamelCase : int=-1 ):
'''simple docstring'''
if c == -1:
__UpperCAmelCase : Dict = floor(random() * 10_000 ) + 10
for i in range(UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__UpperCAmelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase , UpperCamelCase , 1 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Tuple=-2 ):
'''simple docstring'''
__UpperCAmelCase : int = deque()
__UpperCAmelCase : List[Any] = []
if s == -2:
__UpperCAmelCase : int = list(self.graph )[0]
d.append(UpperCamelCase )
visited.append(UpperCamelCase )
while d:
__UpperCAmelCase : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
return len(self.graph[u] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : int = []
__UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(UpperCamelCase )
visited.append(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = -2
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Tuple = s
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase : Optional[Any] = len(UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase : Dict = True
if len(UpperCamelCase ) != 0:
__UpperCAmelCase : int = stack[len(UpperCamelCase ) - 1]
else:
__UpperCAmelCase : List[str] = False
indirect_parents.append(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = s
__UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(UpperCamelCase ) == 0:
return list(UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : str = []
__UpperCAmelCase : Any = list(self.graph )[0]
stack.append(UpperCamelCase )
visited.append(UpperCamelCase )
__UpperCAmelCase : str = -2
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[Any] = s
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase : Optional[int] = len(UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase : Tuple = True
if len(UpperCamelCase ) != 0:
__UpperCAmelCase : Optional[int] = stack[len(UpperCamelCase ) - 1]
else:
__UpperCAmelCase : Any = False
indirect_parents.append(UpperCamelCase )
__UpperCAmelCase : Dict = s
__UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(UpperCamelCase ) == 0:
return False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return list(self.graph )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Dict=-2 , UpperCamelCase : int=-1 ):
'''simple docstring'''
__UpperCAmelCase : List[str] = time()
self.dfs(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = time()
return end - begin
def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any]=-2 ):
'''simple docstring'''
__UpperCAmelCase : Tuple = time()
self.bfs(UpperCamelCase )
__UpperCAmelCase : Any = time()
return end - begin
| 320 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float:
'''simple docstring'''
__UpperCAmelCase : Dict = 0.0
for coeff in reversed(_UpperCamelCase ):
__UpperCAmelCase : Any = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 320 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : List[Any] = """mobilenet_v2"""
def __init__( self , __snake_case=3 , __snake_case=224 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=32 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=255 , **__snake_case , ):
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
_SCREAMING_SNAKE_CASE : Tuple = num_channels
_SCREAMING_SNAKE_CASE : str = image_size
_SCREAMING_SNAKE_CASE : List[Any] = depth_multiplier
_SCREAMING_SNAKE_CASE : int = depth_divisible_by
_SCREAMING_SNAKE_CASE : Optional[int] = min_depth
_SCREAMING_SNAKE_CASE : List[Any] = expand_ratio
_SCREAMING_SNAKE_CASE : Union[str, Any] = output_stride
_SCREAMING_SNAKE_CASE : int = first_layer_is_expansion
_SCREAMING_SNAKE_CASE : str = finegrained_output
_SCREAMING_SNAKE_CASE : Any = hidden_act
_SCREAMING_SNAKE_CASE : Dict = tf_padding
_SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = semantic_loss_ignore_index
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Dict = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCAmelCase_ ( self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCAmelCase_ ( self ):
return 1e-4
| 200 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase_ : List[Any] = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase_ : Optional[Any] = BASE_URL + '/user'
# https://github.com/settings/tokens
UpperCAmelCase_ : str = os.environ.get('USER_TOKEN', '')
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""Authorization""": f"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 200 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple ) -> Tuple:
# test for the above condition
self.test()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = 0
lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCAmelCase ( self : Dict ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Any ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : str , UpperCAmelCase__ : List[int] ) -> Union[str, Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCAmelCase = token_ids
lowerCAmelCase = len(self.token_ids )
lowerCAmelCase = -1 # the index of the currently fulfilled step
lowerCAmelCase = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase = True
lowerCAmelCase = completed
else:
# failed to make progress.
lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = False
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str]=False ) -> Optional[int]:
lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.fulfilled_idx
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : str=True ) -> str:
lowerCAmelCase = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowerCAmelCase = {}
for token_ids in nested_token_ids:
lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowerCAmelCase = {}
lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
lowerCAmelCase = root
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = self.trie
for current_token in current_seq:
lowerCAmelCase = start[current_token]
lowerCAmelCase = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , UpperCAmelCase__ : List[List[int]] ) -> List[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCAmelCase = DisjunctiveTrie(UpperCAmelCase__ )
lowerCAmelCase = nested_token_ids
lowerCAmelCase = self.trie.max_height
lowerCAmelCase = []
lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Any:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowerCAmelCase = True
else:
lowerCAmelCase = True
self.reset()
lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = False
lowerCAmelCase = []
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.current_seq
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : Tuple , UpperCAmelCase__ : List[Constraint] ) -> str:
lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase = max([c.seqlen for c in constraints] )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = False
self.init_state()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = None
lowerCAmelCase = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[List[int]] ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase , lowerCAmelCase = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCAmelCase , lowerCAmelCase = False, False
if self.completed:
lowerCAmelCase = True
lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowerCAmelCase = None
if not complete and stepped:
lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]:
lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 55 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=9_9 , UpperCAmelCase__ : Any=3_6 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=1_0_0_0 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = text_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = LayoutLMvaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# text + image
lowerCAmelCase = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=False ) -> Optional[int]:
lowerCAmelCase = copy.deepcopy(UpperCAmelCase__ )
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[1, 2]] )
lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase = model(
input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 55 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a ( lowerCAmelCase__ ):
UpperCamelCase : str = 'convbert'
def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any]=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : List[str]=3072 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Tuple=0.0_2 , lowerCAmelCase : List[str]=1E-12 , lowerCAmelCase : str=1 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : str=768 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Any=9 , lowerCAmelCase : int=1 , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Optional[Any] , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE_: Optional[Any] =vocab_size
SCREAMING_SNAKE_CASE_: int =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: Tuple =intermediate_size
SCREAMING_SNAKE_CASE_: str =hidden_act
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: List[str] =initializer_range
SCREAMING_SNAKE_CASE_: Any =layer_norm_eps
SCREAMING_SNAKE_CASE_: int =embedding_size
SCREAMING_SNAKE_CASE_: Optional[Any] =head_ratio
SCREAMING_SNAKE_CASE_: Union[str, Any] =conv_kernel_size
SCREAMING_SNAKE_CASE_: List[str] =num_groups
SCREAMING_SNAKE_CASE_: Optional[Any] =classifier_dropout
class a ( lowerCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: List[Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Optional[int] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 173 |
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> bool:
UpperCAmelCase__ : Any = len(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase__ : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase__ : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
random.seed(a__ )
np.random.seed(a__ )
torch.manual_seed(a__ )
torch.cuda.manual_seed_all(a__ )
# ^^ safe to call this function even if cuda is not available
class _lowerCamelCase :
def __init__(self , __a , __a = 0.9999 , __a = 0.0 , __a = 0 , __a = False , __a = 1.0 , __a = 2 / 3 , __a = None , __a = None , **__a , ) -> Optional[Any]:
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase = True
if kwargs.get("max_value" , __lowerCAmelCase ) is not None:
UpperCamelCase = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase = kwargs["max_value"]
if kwargs.get("min_value" , __lowerCAmelCase ) is not None:
UpperCamelCase = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase = kwargs["min_value"]
UpperCamelCase = list(__lowerCAmelCase )
UpperCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get("device" , __lowerCAmelCase ) is not None:
UpperCamelCase = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
self.to(device=kwargs["device"] )
UpperCamelCase = None
UpperCamelCase = decay
UpperCamelCase = min_decay
UpperCamelCase = update_after_step
UpperCamelCase = use_ema_warmup
UpperCamelCase = inv_gamma
UpperCamelCase = power
UpperCamelCase = 0
UpperCamelCase = None # set in `step()`
UpperCamelCase = model_cls
UpperCamelCase = model_config
@classmethod
def snake_case_ (cls , __a , __a ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase = model_cls.load_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase )
UpperCamelCase = model_cls.from_pretrained(__lowerCAmelCase )
UpperCamelCase = cls(model.parameters() , model_cls=__lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(__lowerCAmelCase )
return ema_model
def snake_case_ (self , __a ) -> Tuple:
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
UpperCamelCase = self.model_cls.from_config(self.model_config )
UpperCamelCase = self.state_dict()
state_dict.pop("shadow_params" , __lowerCAmelCase )
model.register_to_config(**__lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__lowerCAmelCase )
def snake_case_ (self , __a ) -> Optional[int]:
UpperCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase = (1 + step) / (10 + step)
UpperCamelCase = min(__lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase = max(__lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def snake_case_ (self , __a ) -> str:
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase = parameters.parameters()
UpperCamelCase = list(__lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase = self.get_decay(self.optimization_step )
UpperCamelCase = decay
UpperCamelCase = 1 - decay
UpperCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase = deepspeed.zero.GatheredParameters(__lowerCAmelCase , modifier_rank=__lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__lowerCAmelCase )
def snake_case_ (self , __a ) -> int:
UpperCamelCase = list(__lowerCAmelCase )
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def snake_case_ (self , __a=None , __a=None ) -> Any:
UpperCamelCase = [
p.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) if p.is_floating_point() else p.to(device=__lowerCAmelCase )
for p in self.shadow_params
]
def snake_case_ (self ) -> List[Any]:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def snake_case_ (self , __a ) -> List[Any]:
UpperCamelCase = [param.detach().cpu().clone() for param in parameters]
def snake_case_ (self , __a ) -> Optional[Any]:
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , __lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase = None
def snake_case_ (self , __a ) -> Dict:
UpperCamelCase = copy.deepcopy(__lowerCAmelCase )
UpperCamelCase = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
UpperCamelCase = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , __lowerCAmelCase ):
raise ValueError("Invalid min_decay" )
UpperCamelCase = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , __lowerCAmelCase ):
raise ValueError("Invalid optimization_step" )
UpperCamelCase = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , __lowerCAmelCase ):
raise ValueError("Invalid update_after_step" )
UpperCamelCase = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __lowerCAmelCase ):
raise ValueError("Invalid use_ema_warmup" )
UpperCamelCase = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
UpperCamelCase = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
UpperCamelCase = state_dict.get("shadow_params" , __lowerCAmelCase )
if shadow_params is not None:
UpperCamelCase = shadow_params
if not isinstance(self.shadow_params , __lowerCAmelCase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 371 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase__ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=_SCREAMING_SNAKE_CASE , all_configs=_SCREAMING_SNAKE_CASE , save_infos=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TestCommand(*_SCREAMING_SNAKE_CASE )
test_command.run()
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "README.md" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_351_563,
"num_examples": 10_000,
},
{
"name": "validation",
"num_bytes": 238_418,
"num_examples": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos["default"] , _SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos["default"] , _SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(_SCREAMING_SNAKE_CASE ) == list(_SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 244 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCAmelCase__ = F"down_blocks.{i}.resnets.{j}."
UpperCAmelCase__ = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCAmelCase__ = F"down_blocks.{i}.attentions.{j}."
UpperCAmelCase__ = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCAmelCase__ = F"up_blocks.{i}.resnets.{j}."
UpperCAmelCase__ = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCAmelCase__ = F"up_blocks.{i}.attentions.{j}."
UpperCAmelCase__ = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCAmelCase__ = F"down_blocks.{i}.downsamplers.0.conv."
UpperCAmelCase__ = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCAmelCase__ = F"up_blocks.{i}.upsamplers.0."
UpperCAmelCase__ = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCAmelCase__ = 'mid_block.attentions.0.'
UpperCAmelCase__ = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCAmelCase__ = F"mid_block.resnets.{j}."
UpperCAmelCase__ = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
_snake_case = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_snake_case = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_snake_case = v.replace(__lowerCamelCase , __lowerCamelCase )
_snake_case = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_snake_case = v.replace(__lowerCamelCase , __lowerCamelCase )
_snake_case = v
_snake_case = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCAmelCase__ = F"encoder.down_blocks.{i}.resnets.{j}."
UpperCAmelCase__ = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCAmelCase__ = F"down_blocks.{i}.downsamplers.0."
UpperCAmelCase__ = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCAmelCase__ = F"up_blocks.{i}.upsamplers.0."
UpperCAmelCase__ = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCAmelCase__ = F"decoder.up_blocks.{i}.resnets.{j}."
UpperCAmelCase__ = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCAmelCase__ = F"mid_block.resnets.{i}."
UpperCAmelCase__ = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Optional[int]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> str:
_snake_case = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_snake_case = v.replace(__lowerCamelCase , __lowerCamelCase )
_snake_case = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_snake_case = v.replace(__lowerCamelCase , __lowerCamelCase )
_snake_case = v
_snake_case = {v: vae_state_dict[k] for k, v in mapping.items()}
_snake_case = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
_snake_case = reshape_weight_for_sd(__lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCAmelCase__ = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCAmelCase__ = {'q': 0, 'k': 1, 'v': 2}
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> List[Any]:
_snake_case = {}
_snake_case = {}
_snake_case = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_snake_case = k[: -len('''.q_proj.weight''' )]
_snake_case = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_snake_case = [None, None, None]
_snake_case = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_snake_case = k[: -len('''.q_proj.bias''' )]
_snake_case = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_snake_case = [None, None, None]
_snake_case = v
continue
_snake_case = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
_snake_case = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_snake_case = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
_snake_case = torch.cat(__lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_snake_case = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
_snake_case = torch.cat(__lowerCamelCase )
return new_state_dict
def _UpperCAmelCase ( __lowerCamelCase : int ) -> str:
return text_enc_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCAmelCase__ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCAmelCase__ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCAmelCase__ = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCAmelCase__ = load_file(unet_path, device='cpu')
else:
UpperCAmelCase__ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCAmelCase__ = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCAmelCase__ = load_file(vae_path, device='cpu')
else:
UpperCAmelCase__ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCAmelCase__ = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCAmelCase__ = load_file(text_enc_path, device='cpu')
else:
UpperCAmelCase__ = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCAmelCase__ = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCAmelCase__ = convert_unet_state_dict(unet_state_dict)
UpperCAmelCase__ = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCAmelCase__ = convert_vae_state_dict(vae_state_dict)
UpperCAmelCase__ = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCAmelCase__ = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCAmelCase__ = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCAmelCase__ = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
UpperCAmelCase__ = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCAmelCase__ = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 288 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Any:
stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return arr
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCamelCase , i + t , (__lowerCamelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 288 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCAmelCase_ , )
assert hasattr(self , 'env')
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=UpperCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase_ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
TrainingJobAnalytics(UpperCAmelCase_).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : int = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
a : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
a : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCAmelCase_)
| 354 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase : Tuple = datasets.load_iris()
_lowerCamelCase : Union[str, Any] = np.array(data['data'])
_lowerCamelCase : Union[str, Any] = np.array(data['target'])
_lowerCamelCase : List[str] = data['target_names']
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = train_test_split(X, y)
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return np.linalg.norm(np.array(UpperCAmelCase ) - np.array(UpperCAmelCase ) )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=5 ) ->Tuple:
"""simple docstring"""
A = zip(UpperCAmelCase , UpperCAmelCase )
# List of distances of all points from the point to be classified
A = []
for data_point in data:
A = euclidean_distance(data_point[0] , UpperCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A = [i[1] for i in sorted(UpperCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A = Counter(UpperCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 258 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
'''simple docstring'''
from PIL import Image
def __magic_name__( lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = image.size
__lowerCAmelCase = 0
__lowerCAmelCase = image.load()
for i in range(lowerCamelCase):
for j in range(lowerCamelCase):
__lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase):
for i in range(lowerCamelCase):
__lowerCAmelCase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCAmelCase : List[str] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 9 |
'''simple docstring'''
from math import sqrt
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool"
return status
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2, n + 1))
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase)):
for j in range(i + 1, len(lowerCamelCase)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1):
if is_prime(lowerCamelCase):
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase):
while quotient != 1:
if is_prime(lowerCamelCase) and (quotient % factor == 0):
ans.append(lowerCamelCase)
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = max(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = min(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 == 0
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int"
assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool"
return number % 2 != 0
def __magic_name__( lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase)
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(lowerCamelCase)
__lowerCAmelCase = len(lowerCamelCase)
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (len(lowerCamelCase) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(lowerCamelCase)
__lowerCAmelCase = prime_factorization(lowerCamelCase)
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(max(lowerCamelCase, lowerCamelCase)):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(lowerCamelCase)
for _ in range(lowerCamelCase):
ans *= n
done.append(lowerCamelCase)
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase):
ans += 1
# precondition
assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime(
lowerCamelCase), "'ans' must been a prime number and from type int"
return ans
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase)
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase):
number += 1
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and ans[0] != p_number_a
and ans[len(lowerCamelCase) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1, n + 1):
if n % divisor == 0:
ans.append(lowerCamelCase)
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(lowerCamelCase)
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def __magic_name__( lowerCamelCase, lowerCamelCase):
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and isinstance(lowerCamelCase, lowerCamelCase)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase))
# precondition
assert (
isinstance(lowerCamelCase, lowerCamelCase)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1, n + 1):
ans *= factor
return ans
def __magic_name__( lowerCamelCase):
assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 9 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase__ = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCAmelCase__ = {'unk_token': '<unk>'}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
UpperCAmelCase__ = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase__ = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_A , _A )
def lowercase_ (self : str , **__UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_A )
def lowercase_ (self : Dict , **__UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_A )
def lowercase_ (self : int , **__UpperCAmelCase : str ) -> Any:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
UpperCAmelCase__ = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_A )
UpperCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def lowercase_ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = OwlViTProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_A , return_tensors="np" )
UpperCAmelCase__ = processor(images=_A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = OwlViTProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = processor(text=_A , return_tensors="np" )
UpperCAmelCase__ = tokenizer(_A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowercase_ (self : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = OwlViTProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = 'google/owlvit-base-patch32'
UpperCAmelCase__ = OwlViTProcessor.from_pretrained(_A )
UpperCAmelCase__ = ['cat', 'nasa badge']
UpperCAmelCase__ = processor(text=_A )
UpperCAmelCase__ = 1_6
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'google/owlvit-base-patch32'
UpperCAmelCase__ = OwlViTProcessor.from_pretrained(_A )
UpperCAmelCase__ = [['cat', 'nasa badge'], ['person']]
UpperCAmelCase__ = processor(text=_A )
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = len(_A )
UpperCAmelCase__ = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'google/owlvit-base-patch32'
UpperCAmelCase__ = OwlViTProcessor.from_pretrained(_A )
UpperCAmelCase__ = ['cat', 'nasa badge']
UpperCAmelCase__ = processor(text=_A )
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = inputs['input_ids']
UpperCAmelCase__ = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = OwlViTProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = OwlViTProcessor(tokenizer=_A , image_processor=_A )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(_A )
UpperCAmelCase__ = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 65 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[Any] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : List[str] = """▁"""
_a : Optional[int] = {"""vocab_file""": """spiece.model"""}
_a : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_a : int = {
"""google/pegasus-xsum""": 5_1_2,
}
_a : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =VOCAB_FILES_NAMES
a : Tuple =VOCAB_FILES_NAMES
a : Any =PRETRAINED_VOCAB_FILES_MAP
a : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<mask_2>",__SCREAMING_SNAKE_CASE="<mask_1>",__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=1_03,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
raise TypeError(
f'additional_special_tokens should be of type {type(__SCREAMING_SNAKE_CASE )}, but is'
f' {type(__SCREAMING_SNAKE_CASE )}' )
__lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__SCREAMING_SNAKE_CASE ),self.offset - 1 )
]
if len(set(__SCREAMING_SNAKE_CASE ) ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowerCAmelCase = additional_special_tokens_extended
else:
__lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2,self.offset )]
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token_sent=__SCREAMING_SNAKE_CASE,offset=__SCREAMING_SNAKE_CASE,additional_special_tokens=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = mask_token_sent
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
__lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1,self.offset - 1 )} )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase = self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
return 1
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(__SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 46 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__lowerCamelCase = VideoClassificationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ , top_k=2 )
__lowerCamelCase = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: List[Any] ):
for example in examples:
__lowerCamelCase = video_classifier(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
{"""score""": ANY(UpperCamelCase_ ), """label""": ANY(UpperCamelCase_ )},
] , )
@require_torch
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
__lowerCamelCase = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
__lowerCamelCase = pipeline(
"""video-classification""" , model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , frame_sampling_rate=4 )
__lowerCamelCase = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__lowerCamelCase = video_classifier(UpperCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
__lowerCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
| 12 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
SCREAMING_SNAKE_CASE = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(self )
SCREAMING_SNAKE_CASE = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __A ( self ) -> Vector:
return Vector(self.__components )
def __A ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE = value
def __A ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
SCREAMING_SNAKE_CASE = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
SCREAMING_SNAKE_CASE = self * other
SCREAMING_SNAKE_CASE = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
SCREAMING_SNAKE_CASE = [0] * dimension
SCREAMING_SNAKE_CASE = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Vector , SCREAMING_SNAKE_CASE_ : Vector ) -> Vector:
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = matrix
SCREAMING_SNAKE_CASE = w
SCREAMING_SNAKE_CASE = h
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
SCREAMING_SNAKE_CASE = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __A ( self ) -> int:
return self.__height
def __A ( self ) -> int:
return self.__width
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE = value
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
SCREAMING_SNAKE_CASE = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception('Indices out of bounds' )
def __A ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
SCREAMING_SNAKE_CASE = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 113 | 0 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : list[list[int | float]] ):
"""simple docstring"""
__UpperCamelCase =len(__UpperCamelCase )
__UpperCamelCase =len(matrix[0] )
__UpperCamelCase =min(__UpperCamelCase , __UpperCamelCase )
for row in range(__UpperCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __UpperCamelCase ):
__UpperCamelCase =matrix[col][row] / matrix[row][row]
for i in range(__UpperCamelCase , __UpperCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__UpperCamelCase =True
for i in range(row + 1 , __UpperCamelCase ):
if matrix[i][row] != 0:
__UpperCamelCase , __UpperCamelCase =matrix[i], matrix[row]
__UpperCamelCase =False
break
if reduce:
rank -= 1
for i in range(__UpperCamelCase ):
__UpperCamelCase =matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | """simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _A ( __magic_name__):
def __lt__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self[-1] == other[-1]
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[Stack] = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE_ : Optional[int] = Stack([element] )
SCREAMING_SNAKE_CASE_ : Dict = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE_ : Optional[int] = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase : List[str] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 253 |
lowerCAmelCase : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 253 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A: Any = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : str
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : Optional[Union[str, int]] = None
__lowerCAmelCase : Optional[Union[str, int]] = None
__lowerCAmelCase : Optional[Union[str, int]] = None
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Tuple:
'''simple docstring'''
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return Version(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return other
raise TypeError(F"{other} (type {type(_SCREAMING_SNAKE_CASE )}) cannot be compared to version." )
def __eq__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
try:
UpperCAmelCase : Dict = self._validate_operand(_SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self._validate_operand(_SCREAMING_SNAKE_CASE )
return self.tuple < other.tuple
def __hash__( self ) -> Union[str, Any]:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.version_str
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Optional[int] = _VERSION_REG.match(UpperCamelCase )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(UpperCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def _snake_case ( UpperCamelCase : List[str] ):
return ".".join(str(UpperCamelCase ) for v in version_tuple )
| 76 |
"""simple docstring"""
import baseaa
def _snake_case ( UpperCamelCase : str ):
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def _snake_case ( UpperCamelCase : bytes ):
return baseaa.aaadecode(UpperCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 1 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
A_ = number_of_bytes // partitions
A_ = []
for i in range(lowerCAmelCase__ ):
A_ = i * bytes_per_partition + 1
A_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 45 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__snake_case : Any = logging.get_logger(__name__)
class A__ ( __lowercase ):
SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
SCREAMING_SNAKE_CASE = ['''tokenizer''']
SCREAMING_SNAKE_CASE = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None) -> str:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = speaker_embeddings
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int="speaker_embeddings_path.json" , **_SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
__lowerCAmelCase : Optional[int] = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("subfolder" , _SCREAMING_SNAKE_CASE) , cache_dir=kwargs.pop("cache_dir" , _SCREAMING_SNAKE_CASE) , force_download=kwargs.pop("force_download" , _SCREAMING_SNAKE_CASE) , proxies=kwargs.pop("proxies" , _SCREAMING_SNAKE_CASE) , resume_download=kwargs.pop("resume_download" , _SCREAMING_SNAKE_CASE) , local_files_only=kwargs.pop("local_files_only" , _SCREAMING_SNAKE_CASE) , use_auth_token=kwargs.pop("use_auth_token" , _SCREAMING_SNAKE_CASE) , revision=kwargs.pop("revision" , _SCREAMING_SNAKE_CASE) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""")
__lowerCAmelCase : Union[str, Any] = None
else:
with open(_SCREAMING_SNAKE_CASE) as speaker_embeddings_json:
__lowerCAmelCase : int = json.load(_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
return cls(tokenizer=_SCREAMING_SNAKE_CASE , speaker_embeddings=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any]="speaker_embeddings_path.json" , _SCREAMING_SNAKE_CASE: str="speaker_embeddings" , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Dict , ) -> Optional[int]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "v2") , exist_ok=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__lowerCAmelCase : Any = self._load_voice_preset(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _SCREAMING_SNAKE_CASE , F"""{prompt_key}_{key}""") , voice_preset[key] , allow_pickle=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , F"""{prompt_key}_{key}.npy""")
__lowerCAmelCase : Optional[Any] = tmp_dict
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , "w") as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str = None , **_SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.speaker_embeddings[voice_preset]
__lowerCAmelCase : List[str] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""")
__lowerCAmelCase : Tuple = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _SCREAMING_SNAKE_CASE) , cache_dir=kwargs.pop("cache_dir" , _SCREAMING_SNAKE_CASE) , force_download=kwargs.pop("force_download" , _SCREAMING_SNAKE_CASE) , proxies=kwargs.pop("proxies" , _SCREAMING_SNAKE_CASE) , resume_download=kwargs.pop("resume_download" , _SCREAMING_SNAKE_CASE) , local_files_only=kwargs.pop("local_files_only" , _SCREAMING_SNAKE_CASE) , use_auth_token=kwargs.pop("use_auth_token" , _SCREAMING_SNAKE_CASE) , revision=kwargs.pop("revision" , _SCREAMING_SNAKE_CASE) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/') , voice_preset_paths[key])}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""")
__lowerCAmelCase : List[Any] = np.load(_SCREAMING_SNAKE_CASE)
return voice_preset_dict
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[dict] = None) -> Dict:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""")
if not isinstance(voice_preset[key] , np.ndarray):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""")
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""")
def __call__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]="pt" , _SCREAMING_SNAKE_CASE: Optional[int]=256 , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: str=False , **_SCREAMING_SNAKE_CASE: List[str] , ) -> str:
"""simple docstring"""
if voice_preset is not None and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__lowerCAmelCase : Dict = self._load_voice_preset(_SCREAMING_SNAKE_CASE)
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) and not voice_preset.endswith(".npz"):
__lowerCAmelCase : Any = voice_preset + """.npz"""
__lowerCAmelCase : Optional[Any] = np.load(_SCREAMING_SNAKE_CASE)
if voice_preset is not None:
self._validate_voice_preset_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
__lowerCAmelCase : Any = voice_preset
return encoded_text | 363 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __lt__( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self: int , _SCREAMING_SNAKE_CASE: Any) -> Tuple:
"""simple docstring"""
return self[-1] == other[-1]
def _lowercase ( __snake_case ) -> list:
__lowerCAmelCase : list[Stack] = []
# sort into stacks
for element in collection:
__lowerCAmelCase : Dict = Stack([element] )
__lowerCAmelCase : str = bisect_left(__snake_case ,__snake_case )
if i != len(__snake_case ):
stacks[i].append(__snake_case )
else:
stacks.append(__snake_case )
# use a heap-based merge to merge stack efficiently
__lowerCAmelCase : Union[str, Any] = merge(*(reversed(__snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Optional[int] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted)) | 58 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :Optional[Any] = len(__lowerCamelCase )
lowercase_ :int = []
for i in range(len(__lowerCamelCase ) - pat_len + 1 ):
lowercase_ :List[Any] = True
for j in range(__lowerCamelCase ):
if s[i + j] != pattern[j]:
lowercase_ :str = False
break
if match_found:
position.append(__lowerCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 223 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ):
return int((input_a, input_a).count(0 ) == 0 )
def UpperCAmelCase_ ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 223 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
UpperCAmelCase_ : Any = {
"169M": 768,
"430M": 1_024,
"1B5": 2_048,
"3B": 2_560,
"7B": 4_096,
"14B": 5_120,
}
def UpperCamelCase ( _A : Dict )-> Optional[int]:
"""simple docstring"""
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
A__ = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A__ = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A__ = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , _A )
# ffn -> feed_forward
A__ = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A__ = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A__ = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A__ = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A__ = "rwkv." + name
A__ = weight
return state_dict
def UpperCamelCase ( _A : str , _A : List[Any] , _A : List[Any] , _A : int=None , _A : List[str]=None , _A : Dict=False , _A : List[Any]=None )-> str:
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A__ = 50277
A__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=_A )
A__ = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
A__ = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(_A , _A )
A__ = torch.load(_A , map_location="cpu" )
A__ = convert_state_dict(_A )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
A__ = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A__ = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 198 |
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase ( _A : int )-> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( )-> Iterator[int]:
"""simple docstring"""
A__ = 2
while True:
if is_prime(_A ):
yield num
num += 1
def UpperCamelCase ( _A : int = 2000000 )-> int:
"""simple docstring"""
return sum(takewhile(lambda _A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__snake_case = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__snake_case = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.1_5},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
__snake_case = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__snake_case = """allenai"""
def _A ( SCREAMING_SNAKE_CASE__ : str ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCamelCase :List[str] = dict((re.sub(R'''@@$''' , '''''' , SCREAMING_SNAKE_CASE__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
UpperCamelCase :List[Any] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
UpperCamelCase :Any = d[k] # restore
return da
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
# prep
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCamelCase :List[Any] = basename(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = dirname(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCamelCase :List[str] = cls.hub_models()
UpperCamelCase :Tuple = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
UpperCamelCase :str = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
UpperCamelCase :Tuple = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , archive_map=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = vars(chkpt['''args''']['''model'''] )
UpperCamelCase :List[str] = args['''source_lang''']
UpperCamelCase :Dict = args['''target_lang''']
UpperCamelCase :Optional[Any] = dirname(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{src_lang}.txt''' )
UpperCamelCase :Dict = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dict.{tgt_lang}.txt''' )
UpperCamelCase :Tuple = Dictionary.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = rewrite_dict_keys(src_dict.indices )
UpperCamelCase :List[Any] = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab-src.json''' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCamelCase :Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
UpperCamelCase :List[Any] = False
break
UpperCamelCase :Any = Dictionary.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = rewrite_dict_keys(tgt_dict.indices )
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab-tgt.json''' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCamelCase :Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as fin:
UpperCamelCase :str = fin.read()
UpperCamelCase :List[str] = re.sub(R''' \d+$''' , '''''' , SCREAMING_SNAKE_CASE__ , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
UpperCamelCase :Any = os.path.join(SCREAMING_SNAKE_CASE__ , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
UpperCamelCase :List[Any] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
UpperCamelCase :Any = 5
UpperCamelCase :int = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCamelCase :str = best_score_hparams[model_dir]['''length_penalty''']
else:
UpperCamelCase :int = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
UpperCamelCase :Dict = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ , indent=SCREAMING_SNAKE_CASE__ ) )
# model
UpperCamelCase :Tuple = chkpt['''models'''][0]
UpperCamelCase :int = model.state_dict()
# rename keys to start with 'model.'
UpperCamelCase :Optional[Any] = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCamelCase :int = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
# save
UpperCamelCase :Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 259 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :List[Any] = parent
UpperCamelCase :List[str] = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Optional[Any] = patch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :Any = backbone_out_indices
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Any = backbone_featmap_shape
UpperCamelCase :Optional[int] = scope
UpperCamelCase :Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Tuple = (image_size // patch_size) ** 2
UpperCamelCase :int = num_patches + 1
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :int = None
if self.use_labels:
UpperCamelCase :str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[int] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = self.num_labels
UpperCamelCase :Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :int = self.num_labels
UpperCamelCase :str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Tuple = [*signature.parameters.keys()]
UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :int = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Tuple = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = prepare_img()
UpperCamelCase :Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 259 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForSequenceClassification.from_pretrained(lowercase__ , config=lowercase__ )
UpperCAmelCase_ = downstream_dict["""projector.weight"""]
UpperCAmelCase_ = downstream_dict["""projector.bias"""]
UpperCAmelCase_ = downstream_dict["""model.post_net.linear.weight"""]
UpperCAmelCase_ = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForAudioFrameClassification.from_pretrained(lowercase__ , config=lowercase__ )
UpperCAmelCase_ = downstream_dict["""model.linear.weight"""]
UpperCAmelCase_ = downstream_dict["""model.linear.bias"""]
return model
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaForXVector.from_pretrained(lowercase__ , config=lowercase__ )
UpperCAmelCase_ = downstream_dict["""connector.weight"""]
UpperCAmelCase_ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCAmelCase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCAmelCase_ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = torch.load(lowercase__ , map_location="cpu" )
UpperCAmelCase_ = checkpoint["""Downstream"""]
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(lowercase__ )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ , return_attention_mask=lowercase__ , do_normalize=lowercase__ )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase_ = convert_classification(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase_ = convert_diarization(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("ForXVector" ):
UpperCAmelCase_ = convert_xvector(lowercase__ , lowercase__ , lowercase__ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 351 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: List[str] =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 106 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[str] = 'blenderbot-small'
UpperCAmelCase__ : int = ['past_key_values']
UpperCAmelCase__ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Tuple , UpperCamelCase_: Dict=5_02_65 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Any=8 , UpperCamelCase_: int=20_48 , UpperCamelCase_: str=16 , UpperCamelCase_: Tuple=8 , UpperCamelCase_: Optional[int]=20_48 , UpperCamelCase_: Any=16 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Dict=True , UpperCamelCase_: str="gelu" , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Tuple=1 , UpperCamelCase_: Tuple=False , UpperCamelCase_: int=0 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Union[str, Any]=2 , **UpperCamelCase_: Tuple , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCamelCase = {0: """batch"""}
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCamelCase, __lowerCamelCase = self.num_layers
for i in range(UpperCamelCase_ ):
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = super().outputs
else:
__lowerCamelCase = super(UpperCamelCase_ , self ).outputs
if self.use_past:
__lowerCamelCase, __lowerCamelCase = self.num_layers
for i in range(UpperCamelCase_ ):
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
__lowerCamelCase = seq_length if not self.use_past else 1
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCamelCase, __lowerCamelCase = common_inputs["""input_ids"""].shape
__lowerCamelCase = common_inputs["""decoder_input_ids"""].shape[1]
__lowerCamelCase, __lowerCamelCase = self.num_attention_heads
__lowerCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase = decoder_seq_length + 3
__lowerCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
__lowerCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase, __lowerCamelCase = self.num_layers
__lowerCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
__lowerCamelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
__lowerCamelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCamelCase, __lowerCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCamelCase = seqlen + 2
__lowerCamelCase, __lowerCamelCase = self.num_layers
__lowerCamelCase, __lowerCamelCase = self.num_attention_heads
__lowerCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase = common_inputs["""attention_mask"""].dtype
__lowerCamelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
__lowerCamelCase = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
__lowerCamelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
elif self.task == "causal-lm":
__lowerCamelCase = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 |
import os
def a ( A__ : str = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as input_file:
_lowercase =[
[int(A__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase =len(A__ )
_lowercase =len(matrix[0] )
_lowercase =[[-1 for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
_lowercase =matrix[i][0]
for j in range(1 , A__ ):
for i in range(A__ ):
_lowercase =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A__ ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 205 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def a_ ( __lowercase : Dict , __lowercase : List[Any] , __lowercase : Optional[int] ) -> Optional[Any]:
_snake_case = hf_hub_url(repo_id=__lowercase , path=__lowercase , revision=__lowercase )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__lowercase )}''' | 355 |
import baseaa
def a_ ( __lowercase : str ) -> bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def a_ ( __lowercase : bytes ) -> str:
return baseaa.aaadecode(__lowercase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
__lowerCamelCase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 10_24,
}
# fmt: off
__lowerCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCAmelCase ( A_ ):
A__ : Optional[Any] = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = ["input_ids", "attention_mask"]
A__ : List[int] = []
A__ : List[int] = []
def __init__(self : List[Any] , snake_case__ : Tuple , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : int="</s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Tuple , ) -> None:
'''simple docstring'''
snake_case : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case__ , tgt_lang=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : List[str] = 1
snake_case : int = len(self.sp_model )
snake_case : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case__ )
}
snake_case : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
snake_case : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case : str = src_lang if src_lang is not None else "en_XX"
snake_case : Dict = self.lang_code_to_id[self._src_lang]
snake_case : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self : str ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = self.__dict__.copy()
snake_case : List[str] = None
return state
def __setstate__(self : str , snake_case__ : Dict ) -> None:
'''simple docstring'''
snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case : Dict = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : int = []
snake_case : Union[str, Any] = ""
snake_case : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
snake_case : List[Any] = True
snake_case : List[Any] = []
else:
current_sub_tokens.append(snake_case__ )
snake_case : Optional[Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : str = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
snake_case : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[str] , snake_case__ : Optional[str] , **snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
snake_case : str = src_lang
snake_case : List[str] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
snake_case : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
snake_case : Optional[int] = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : List[str] , snake_case__ : str = "en_XX" , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "ro_RO" , **snake_case__ : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
snake_case : str = src_lang
snake_case : int = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : List[str] = self.lang_code_to_id[src_lang]
snake_case : List[str] = [self.cur_lang_code_id]
snake_case : Any = [self.eos_token_id]
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : Tuple = self.lang_code_to_id[tgt_lang]
snake_case : Union[str, Any] = [self.cur_lang_code_id]
snake_case : Tuple = [self.eos_token_id]
| 59 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
__lowerCamelCase = tmp_path / '''file.csv'''
__lowerCamelCase = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = tmp_path / '''malformed_file.csv'''
__lowerCamelCase = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
__lowerCamelCase = tmp_path / '''csv_with_image.csv'''
__lowerCamelCase = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase = tmp_path / '''csv_with_label.csv'''
__lowerCamelCase = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = tmp_path / '''csv_with_int_list.csv'''
__lowerCamelCase = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCamelCase__ , '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase = Csv()
__lowerCamelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCamelCase__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCamelCase__ ) in record.message
for record in caplog.records )
@require_pil
def __lowerCAmelCase ( UpperCamelCase__ ) -> Any:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
__lowerCamelCase = f.read().splitlines()[1]
__lowerCamelCase = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__lowerCamelCase = csv._generate_tables([[csv_file_with_image]] )
__lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__lowerCamelCase = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
__lowerCamelCase = f.read().splitlines()[1:]
__lowerCamelCase = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__lowerCamelCase = csv._generate_tables([[csv_file_with_label]] )
__lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__lowerCamelCase = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCamelCase__ ) for label in labels]
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
__lowerCamelCase = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCamelCase__ : [int(UpperCamelCase__ ) for i in x.split()]} )
__lowerCamelCase = csv._generate_tables([[csv_file_with_int_list]] )
__lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__lowerCamelCase = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 360 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase ={
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Any ="facebook/nllb-200-distilled-600M"
lowerCamelCase : Optional[Any] =(
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowerCamelCase : Tuple ="translator"
lowerCamelCase : Any =AutoTokenizer
lowerCamelCase : Dict =AutoModelForSeqaSeqLM
lowerCamelCase : Union[str, Any] =LANGUAGE_CODES
lowerCamelCase : Any =["text", "text", "text"]
lowerCamelCase : Union[str, Any] =["text"]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Any , a : List[str] , a : Any ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
__lowerCamelCase = self.lang_to_code[src_lang]
__lowerCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a , return_tensors='''pt''' , src_lang=a , tgt_lang=a )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : str ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a )
| 237 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__SCREAMING_SNAKE_CASE =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__SCREAMING_SNAKE_CASE =[0, 25, 50]
__SCREAMING_SNAKE_CASE =[25, 50, 75]
__SCREAMING_SNAKE_CASE =fuzz.membership.trimf(X, abca)
__SCREAMING_SNAKE_CASE =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__SCREAMING_SNAKE_CASE =np.ones(75)
__SCREAMING_SNAKE_CASE =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__SCREAMING_SNAKE_CASE =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__SCREAMING_SNAKE_CASE =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__SCREAMING_SNAKE_CASE =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__SCREAMING_SNAKE_CASE =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__SCREAMING_SNAKE_CASE =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__SCREAMING_SNAKE_CASE =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__SCREAMING_SNAKE_CASE =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__SCREAMING_SNAKE_CASE =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 213 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase_ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase_ : Optional[int] = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase_ : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase_ : Dict = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , 0 )
lowercase_ : str = state_late + state_absent + state_ontime
lowercase_ : Tuple = prizestrings
return prizestrings
def lowercase__( __SCREAMING_SNAKE_CASE : int = 30 ):
return _calculate(__SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 213 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ):
"""simple docstring"""
UpperCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a , ) -> Any:
super().__init__()
self.register_modules(
unet=__a , scheduler=__a , movq=__a , )
UpperCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ (self , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
if latents is None:
UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCamelCase = latents.to(__a )
UpperCamelCase = latents * scheduler.init_noise_sigma
return latents
def snake_case_ (self , __a=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase = torch.device(F"cuda:{gpu_id}" )
UpperCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def snake_case_ (self , __a=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
# We'll offload the last model manually.
UpperCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ (self ) -> str:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__(self , __a , __a , __a = 5_12 , __a = 5_12 , __a = 1_00 , __a = 4.0 , __a = 1 , __a = None , __a = None , __a = "pil" , __a = True , ) -> List[Any]:
UpperCamelCase = self._execution_device
UpperCamelCase = guidance_scale > 1.0
if isinstance(__a , __a ):
UpperCamelCase = torch.cat(__a , dim=0 )
UpperCamelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__a , __a ):
UpperCamelCase = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase = image_embeds.repeat_interleave(__a , dim=0 )
UpperCamelCase = negative_image_embeds.repeat_interleave(__a , dim=0 )
UpperCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
UpperCamelCase = self.scheduler.timesteps
UpperCamelCase = self.unet.config.in_channels
UpperCamelCase , UpperCamelCase = downscale_height_and_width(__a , __a , self.movq_scale_factor )
# create initial latent
UpperCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = {"image_embeds": image_embeds}
UpperCamelCase = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase = variance_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
__a , __a , __a , generator=__a , )[0]
# post-processing
UpperCamelCase = self.movq.decode(__a , force_not_quantize=__a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCamelCase = image * 0.5 + 0.5
UpperCamelCase = image.clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 244 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "falcon"
UpperCAmelCase_ = ["past_key_values"]
def __init__(self , __a=6_50_24 , __a=45_44 , __a=32 , __a=71 , __a=1e-5 , __a=0.02 , __a=True , __a=0.0 , __a=0.0 , __a=None , __a=False , __a=False , __a=True , __a=True , __a=False , __a=11 , __a=11 , **__a , ) -> Union[str, Any]:
UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase = kwargs.pop("n_embed" , __a )
UpperCamelCase = hidden_size if n_embed is None else n_embed
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase = alibi
UpperCamelCase = new_decoder_architecture
UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase = parallel_attn
UpperCamelCase = bias
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@property
def snake_case_ (self ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def snake_case_ (self ) -> Dict:
return not self.alibi
| 244 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
lowercase__ = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' )
return image
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Tuple:
"""simple docstring"""
lowercase__ = dct.pop(A )
lowercase__ = val
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) )
lowercase__ = qkv_bias
def _SCREAMING_SNAKE_CASE (A ) -> List[Any]:
"""simple docstring"""
lowercase__ = 364 if '''coco''' in model_name else 224
lowercase__ = InstructBlipVisionConfig(image_size=A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=32_001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
lowercase__ = InstructBlipConfig(vision_config=A , text_config=A , qformer_config=A )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (A , A=None , A=False ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
lowercase__ = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
lowercase__ ,lowercase__ = get_blipa_config(A )
lowercase__ = InstructBlipForConditionalGeneration(A ).eval()
lowercase__ = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
lowercase__ ,lowercase__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowercase__ = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
lowercase__ = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
lowercase__ ,lowercase__ ,lowercase__ = load_model_and_preprocess(
name=A , model_type=A , is_eval=A , device=A )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(A )
if key.startswith('''Qformer.bert''' ):
lowercase__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowercase__ = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
lowercase__ = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowercase__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
lowercase__ = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
lowercase__ = key.replace('''t5''' , '''language''' )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(A , A )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A , strict=A )
lowercase__ = load_demo_image()
lowercase__ = '''What is unusual about this image?'''
# create processor
lowercase__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=A , image_std=A )
lowercase__ = InstructBlipProcessor(
image_processor=A , tokenizer=A , qformer_tokenizer=A , )
lowercase__ = processor(images=A , text=A , return_tensors='''pt''' ).to(A )
# make sure processor creates exact same pixel values
lowercase__ = vis_processors['''eval'''](A ).unsqueeze(0 ).to(A )
lowercase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A )
original_model.to(A )
hf_model.to(A )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
lowercase__ = hf_model(**A ).logits
else:
lowercase__ = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
lowercase__ = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(A )
lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase__ = hf_model(**A , labels=A ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__ = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , A , atol=A )
print('''Looks ok!''' )
print('''Generating with original model...''' )
lowercase__ = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
lowercase__ = hf_model.generate(
**A , do_sample=A , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ = 2
print('''Original generation:''' , A )
lowercase__ = processor.batch_decode(A , skip_special_tokens=A )
lowercase__ = [text.strip() for text in output_text]
print('''HF generation:''' , A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A )
hf_model.save_pretrained(A )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
lowerCamelCase : Dict = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = """deformable_detr"""
SCREAMING_SNAKE_CASE_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , UpperCamelCase__ : int=True , UpperCamelCase__ : str=None , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3_0_0 , UpperCamelCase__ : Optional[int]=1_0_2_4 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=1_0_2_4 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : str=1_0_2_4 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]="relu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]="sine" , UpperCamelCase__ : Any="resnet50" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=3_0_0 , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Optional[Any]=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.25 , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Dict , )-> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__lowerCAmelCase: List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: int = backbone_config.get("model_type")
__lowerCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase: Any = config_class.from_dict(UpperCamelCase__)
__lowerCAmelCase: int = use_timm_backbone
__lowerCAmelCase: Any = backbone_config
__lowerCAmelCase: Tuple = num_channels
__lowerCAmelCase: str = num_queries
__lowerCAmelCase: List[str] = max_position_embeddings
__lowerCAmelCase: List[Any] = d_model
__lowerCAmelCase: Union[str, Any] = encoder_ffn_dim
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: List[str] = encoder_attention_heads
__lowerCAmelCase: Any = decoder_ffn_dim
__lowerCAmelCase: Union[str, Any] = decoder_layers
__lowerCAmelCase: List[Any] = decoder_attention_heads
__lowerCAmelCase: List[Any] = dropout
__lowerCAmelCase: Optional[Any] = attention_dropout
__lowerCAmelCase: Union[str, Any] = activation_dropout
__lowerCAmelCase: Union[str, Any] = activation_function
__lowerCAmelCase: Dict = init_std
__lowerCAmelCase: int = init_xavier_std
__lowerCAmelCase: str = encoder_layerdrop
__lowerCAmelCase: Union[str, Any] = auxiliary_loss
__lowerCAmelCase: List[Any] = position_embedding_type
__lowerCAmelCase: str = backbone
__lowerCAmelCase: Tuple = use_pretrained_backbone
__lowerCAmelCase: int = dilation
# deformable attributes
__lowerCAmelCase: Union[str, Any] = num_feature_levels
__lowerCAmelCase: Optional[Any] = encoder_n_points
__lowerCAmelCase: Dict = decoder_n_points
__lowerCAmelCase: Optional[Any] = two_stage
__lowerCAmelCase: Tuple = two_stage_num_proposals
__lowerCAmelCase: int = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
__lowerCAmelCase: str = class_cost
__lowerCAmelCase: List[str] = bbox_cost
__lowerCAmelCase: List[str] = giou_cost
# Loss coefficients
__lowerCAmelCase: Tuple = mask_loss_coefficient
__lowerCAmelCase: int = dice_loss_coefficient
__lowerCAmelCase: Any = bbox_loss_coefficient
__lowerCAmelCase: str = giou_loss_coefficient
__lowerCAmelCase: int = eos_coefficient
__lowerCAmelCase: Tuple = focal_alpha
__lowerCAmelCase: Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__)
@property
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
return self.d_model
def lowercase_ ( self : Union[str, Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowerCAmelCase: str = self.backbone_config.to_dict()
__lowerCAmelCase: Tuple = self.__class__.model_type
return output
| 217 | 0 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowerCamelCase ):
print(F'''{i}\t\t{d}''' )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
for j in range(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> list[float]:
UpperCamelCase_: List[Any] = [float("""inf""" )] * vertex_count
UpperCamelCase_: Optional[int] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
UpperCamelCase_: List[str] = distance[u] + w
UpperCamelCase_: str = check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[str] = int(input("""Enter number of vertices: """).strip())
lowerCamelCase_ : List[str] = int(input("""Enter number of edges: """).strip())
lowerCamelCase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowerCamelCase_ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
lowerCamelCase_ : str = int(input("""\nEnter shortest path source:""").strip())
lowerCamelCase_ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 223 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[int]=0 ):
UpperCamelCase_: str = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
UpperCamelCase_: Optional[int] = np.random.RandomState(snake_case_ )
UpperCamelCase_: Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = self.get_dummy_inputs()
UpperCamelCase_: Any = pipe(**snake_case_ ).images
UpperCamelCase_: Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: str = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = self.get_dummy_inputs()
UpperCamelCase_: Dict = pipe(**snake_case_ ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Any = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
UpperCamelCase_: Union[str, Any] = pipe(**self.get_dummy_inputs() )
UpperCamelCase_: Tuple = self.get_dummy_inputs()
UpperCamelCase_: Optional[int] = pipe(**snake_case_ ).images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Optional[int] = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs()
UpperCamelCase_: List[Any] = pipe(**snake_case_ ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: List[str] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = self.get_dummy_inputs()
UpperCamelCase_: Any = pipe(**snake_case_ ).images
UpperCamelCase_: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Tuple = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[Any] = self.get_dummy_inputs()
UpperCamelCase_: Optional[int] = pipe(**snake_case_ ).images
UpperCamelCase_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Tuple = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Dict = ort.SessionOptions()
UpperCamelCase_: Optional[int] = False
return options
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCamelCase_: Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCamelCase_: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Dict = """A fantasy landscape, trending on artstation"""
UpperCamelCase_: List[str] = np.random.RandomState(0 )
UpperCamelCase_: List[str] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCamelCase_: Tuple = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCamelCase_: List[Any] = init_image.resize((768, 512) )
UpperCamelCase_: Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase_: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Any = """A fantasy landscape, trending on artstation"""
UpperCamelCase_: Dict = np.random.RandomState(0 )
UpperCamelCase_: Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: Optional[int] = output.images
UpperCamelCase_: Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCamelCase_: Optional[int] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 223 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self , *,
lowerCAmelCase__ = 4 , lowerCAmelCase__ = 768 , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__()
a : Tuple = nn.Parameter(torch.zeros(lowerCAmelCase__ ) )
# parameters for additional clip time embeddings
a : str = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
# parameters for encoder hidden states
a : int = clip_extra_context_tokens
a : int = nn.Linear(
lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = nn.LayerNorm(lowerCAmelCase__ )
def __a ( self , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
a : str = image_embeddings.shape[0]
a : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
a : Any = classifier_free_guidance_embeddings.expand(
lowerCAmelCase__ , -1 )
a : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
a : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
a : Dict = self.embedding_proj(lowerCAmelCase__ )
a : List[str] = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ )
a : Dict = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
a : Union[str, Any] = self.clip_extra_context_tokens_proj(lowerCAmelCase__ )
a : List[str] = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens )
a : Optional[Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
a : Optional[int] = self.encoder_hidden_states_proj(lowerCAmelCase__ )
a : str = self.text_encoder_hidden_states_norm(lowerCAmelCase__ )
a : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 105 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
SCREAMING_SNAKE_CASE = "src/diffusers"
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
SCREAMING_SNAKE_CASE = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
SCREAMING_SNAKE_CASE = "\n{0} = None\n"
SCREAMING_SNAKE_CASE = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
SCREAMING_SNAKE_CASE = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
A__ = _re_backend.findall(lowercase_ )
if len(lowercase_ ) == 0:
return None
return "_and_".join(lowercase_ )
def _SCREAMING_SNAKE_CASE ( ) -> str:
with open(os.path.join(lowercase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Get to the point we do the actual imports for type checking
A__ = 0
A__ = {}
# Go through the end of the file
while line_index < len(lowercase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
A__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase_ ) and len(lines[line_index] ) > 1:
A__ = lines[line_index]
A__ = _re_single_line_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase_ ) > 0:
A__ = objects
else:
line_index += 1
return backend_specific_objects
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
if name.isupper():
return DUMMY_CONSTANT.format(lowercase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase_ , lowercase_ )
else:
return DUMMY_CLASS.format(lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_=None ) -> Union[str, Any]:
if backend_specific_objects is None:
A__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
A__ = {}
for backend, objects in backend_specific_objects.items():
A__ = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
A__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase_ , lowercase_ ) for o in objects] )
A__ = dummy_file
return dummy_files
def _SCREAMING_SNAKE_CASE ( lowercase_=False ) -> Union[str, Any]:
A__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
A__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
A__ = os.path.join(lowercase_ , "utils" )
A__ = {
backend: os.path.join(lowercase_ , f"""dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py""" )
for backend in dummy_files.keys()
}
A__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase_ ):
with open(lowercase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.read()
else:
A__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"""diffusers.utils.dummy_{short_names.get(lowercase_ , lowercase_ )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 247 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Dict = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = ['LayoutLMv3FeatureExtractor']
UpperCAmelCase__ : int = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( UpperCamelCase_ : int) -> Optional[int]:
'''simple docstring'''
def is_in_circle(UpperCamelCase_ : float, UpperCamelCase_ : float) -> bool:
__lowercase = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowercase = mean(
int(is_in_circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0)))
for _ in range(UpperCamelCase_))
# The ratio of the area for circle to square is pi/4.
__lowercase = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""")
print(F"""The numpy value of pi is {pi}""")
print(F"""The total error is {abs(pi - pi_estimate)}""")
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : Callable[[float], float], UpperCamelCase_ : float = 0.0, UpperCamelCase_ : float = 1.0, ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(UpperCamelCase_, UpperCamelCase_)) for _ in range(UpperCamelCase_)) * (max_value - min_value)
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : float = 0.0, UpperCamelCase_ : float = 1.0) -> None:
'''simple docstring'''
def identity_function(UpperCamelCase_ : float) -> float:
return x
__lowercase = area_under_curve_estimator(
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
__lowercase = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {expected_value}""")
print(F"""Total error is {abs(estimated_value - expected_value)}""")
print("******************")
def _A ( UpperCamelCase_ : int) -> None:
'''simple docstring'''
def function_to_integrate(UpperCamelCase_ : float) -> float:
return sqrt(4.0 - x * x)
__lowercase = area_under_curve_estimator(
UpperCamelCase_, UpperCamelCase_, 0.0, 2.0)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {pi}""")
print(F"""Total error is {abs(estimated_value - pi)}""")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 | 1 |
import math
import qiskit
def A ( _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = 1 ):
'''simple docstring'''
if (
isinstance(_lowerCamelCase , _lowerCamelCase )
or isinstance(_lowerCamelCase , _lowerCamelCase )
or isinstance(_lowerCamelCase , _lowerCamelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_lowerCamelCase ) != input_a)
or (math.floor(_lowerCamelCase ) != input_a)
or (math.floor(_lowerCamelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_lowerCAmelCase : List[Any] = qiskit.QuantumRegister(4 , "qr" )
_lowerCAmelCase : Dict = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_lowerCAmelCase : Any = [input_a, input_a, carry_in]
_lowerCAmelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCamelCase ) # measure the last two qbits
_lowerCAmelCase : Tuple = qiskit.Aer.get_backend("aer_simulator" )
_lowerCAmelCase : Dict = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 300 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = ZeroShotClassificationPipeline(
model=__a, tokenizer=__a, candidate_labels=["polics", "health"])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# No kwarg
_lowerCAmelCase : int = classifier("Who are you voting for in 2020?", ["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : Tuple = classifier("Who are you voting for in 2020?", candidate_labels=["politics"])
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
_lowerCAmelCase : List[Any] = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health")
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[str] = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"])
self.assertEqual(
__a, {"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0)
_lowerCAmelCase : List[Any] = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}")
self.assertEqual(__a, {"sequence": ANY(__a), "labels": [ANY(__a)], "scores": [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase : Optional[int] = classifier(["I am happy"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(1)
], )
_lowerCAmelCase : Any = classifier(["I am happy", "I am sad"], ["positive", "negative"])
self.assertEqual(
__a, [
{"sequence": ANY(__a), "labels": [ANY(__a), ANY(__a)], "scores": [ANY(__a), ANY(__a)]}
for i in range(2)
], )
with self.assertRaises(__a):
classifier("", candidate_labels="politics")
with self.assertRaises(__a):
classifier(__a, candidate_labels="politics")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels="")
with self.assertRaises(__a):
classifier("Who are you voting for in 2020?", candidate_labels=__a)
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__a):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__a, )
self.run_entailment_id(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = zero_shot_classifier.model.config
_lowerCAmelCase : Optional[Any] = config.labelaid
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier.entailment_id
_lowerCAmelCase : Any = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1)
_lowerCAmelCase : Optional[int] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[int] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0)
_lowerCAmelCase : Optional[Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2)
_lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(__a, zero_shot_classifier.entailment_id)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
_lowerCAmelCase : List[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt")
_lowerCAmelCase : Optional[Any] = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf")
_lowerCAmelCase : Dict = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"])
self.assertEqual(
nested_simplify(__a), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
_lowerCAmelCase : str = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__a, )
self.assertEqual(
nested_simplify(__a), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 300 | 1 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
UpperCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
UpperCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 141 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : Union[str, Any]=2_81_23 ):
'''simple docstring'''
__lowercase =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase =set()
__lowercase =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 141 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=__A):
'''simple docstring'''
snake_case_ =['keras_nlp']
def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
requires_backends(self ,['''keras_nlp'''] )
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : str ={
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['ConditionalDetrFeatureExtractor']
__snake_case : Union[str, Any] =['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __magic_name__( lowerCamelCase):
if not is_accelerate_available():
return method
__lowerCAmelCase = version.parse(accelerate.__version__).base_version
if version.parse(lowerCamelCase) < version.parse('''0.17.0'''):
return method
def wrapper(self, *lowerCamelCase, **lowerCamelCase):
if hasattr(self, '''_hf_hook''') and hasattr(self._hf_hook, '''pre_forward'''):
self._hf_hook.pre_forward(self)
return method(self, *lowerCamelCase, **lowerCamelCase)
return wrapper
| 174 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCAmelCase : Union[str, Any] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_UpperCAmelCase : int = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
_UpperCAmelCase : Any = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def _snake_case (self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def _snake_case (self , __lowercase , __lowercase , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , ):
__lowerCAmelCase = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCAmelCase = [[refs[i] for refs in references] for i in range(__lowercase )]
__lowerCAmelCase = TER(
normalized=__lowercase , no_punct=__lowercase , asian_support=__lowercase , case_sensitive=__lowercase , )
__lowerCAmelCase = sb_ter.corpus_score(__lowercase , __lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 174 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__A ={
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__A ={
'''ctrl''': 2_56,
}
__A ={
'''Pregnancy''': 16_86_29,
'''Christianity''': 76_75,
'''Explain''': 10_64_23,
'''Fitness''': 6_34_40,
'''Saving''': 6_31_63,
'''Ask''': 2_71_71,
'''Ass''': 9_59_85,
'''Joke''': 16_35_09,
'''Questions''': 4_56_22,
'''Thoughts''': 4_96_05,
'''Retail''': 5_23_42,
'''Feminism''': 16_43_38,
'''Writing''': 1_19_92,
'''Atheism''': 19_22_63,
'''Netflix''': 4_86_16,
'''Computing''': 3_96_39,
'''Opinion''': 4_32_13,
'''Alone''': 4_49_67,
'''Funny''': 5_89_17,
'''Gaming''': 4_03_58,
'''Human''': 40_88,
'''India''': 13_31,
'''Joker''': 7_71_38,
'''Diet''': 3_62_06,
'''Legal''': 1_18_59,
'''Norman''': 49_39,
'''Tip''': 7_26_89,
'''Weight''': 5_23_43,
'''Movies''': 4_62_73,
'''Running''': 2_34_25,
'''Science''': 20_90,
'''Horror''': 3_77_93,
'''Confession''': 6_05_72,
'''Finance''': 1_22_50,
'''Politics''': 1_63_60,
'''Scary''': 19_19_85,
'''Support''': 1_26_54,
'''Technologies''': 3_25_16,
'''Teenage''': 6_61_60,
'''Event''': 3_27_69,
'''Learned''': 6_74_60,
'''Notion''': 18_27_70,
'''Wikipedia''': 3_75_83,
'''Books''': 66_65,
'''Extract''': 7_60_50,
'''Confessions''': 10_27_01,
'''Conspiracy''': 7_59_32,
'''Links''': 6_36_74,
'''Narcissus''': 15_04_25,
'''Relationship''': 5_47_66,
'''Relationships''': 13_47_96,
'''Reviews''': 4_16_71,
'''News''': 42_56,
'''Translation''': 2_68_20,
'''multilingual''': 12_84_06,
}
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[int] = char
UpperCAmelCase__ : int = set(__lowerCAmelCase )
return pairs
class _snake_case ( lowerCamelCase_ ):
lowerCAmelCase :Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase :int = CONTROL_CODES
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<unk>" , **_lowerCamelCase):
super().__init__(unk_token=__snake_case , **__snake_case)
with open(__snake_case , encoding="""utf-8""") as vocab_handle:
UpperCAmelCase__ : int = json.load(__snake_case)
UpperCAmelCase__ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding="""utf-8""") as merges_handle:
UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""")[1:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()) for merge in merges]
UpperCAmelCase__ : str = dict(zip(__snake_case , range(len(__snake_case))))
UpperCAmelCase__ : Tuple = {}
@property
def snake_case__ ( self):
return len(self.encoder)
def snake_case__ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case__ ( self , _lowerCamelCase):
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Union[str, Any] = tuple(__snake_case)
UpperCAmelCase__ : str = tuple(list(word[:-1]) + [word[-1] + """</w>"""])
UpperCAmelCase__ : str = get_pairs(__snake_case)
if not pairs:
return token
while True:
UpperCAmelCase__ : Union[str, Any] = min(__snake_case , key=lambda _lowerCamelCase: self.bpe_ranks.get(__snake_case , float("""inf""")))
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = bigram
UpperCAmelCase__ : str = []
UpperCAmelCase__ : List[str] = 0
while i < len(__snake_case):
try:
UpperCAmelCase__ : List[Any] = word.index(__snake_case , __snake_case)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCAmelCase__ : int = j
if word[i] == first and i < len(__snake_case) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCAmelCase__ : str = tuple(__snake_case)
UpperCAmelCase__ : List[Any] = new_word
if len(__snake_case) == 1:
break
else:
UpperCAmelCase__ : Optional[int] = get_pairs(__snake_case)
UpperCAmelCase__ : Union[str, Any] = """@@ """.join(__snake_case)
UpperCAmelCase__ : Tuple = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[Any] = re.findall(r"""\S+\n?""" , __snake_case)
for token in words:
split_tokens.extend(list(self.bpe(__snake_case).split(""" """)))
return split_tokens
def snake_case__ ( self , _lowerCamelCase):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token))
def snake_case__ ( self , _lowerCamelCase):
return self.decoder.get(__snake_case , self.unk_token)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : str = """ """.join(__snake_case).replace("""@@ """ , """""").strip()
return out_string
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not os.path.isdir(__snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
UpperCAmelCase__ : Optional[int] = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
UpperCAmelCase__ : int = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(__snake_case , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case) + """\n""")
UpperCAmelCase__ : Any = 0
with open(__snake_case , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""")
UpperCAmelCase__ : List[Any] = token_index
writer.write(""" """.join(__snake_case) + """\n""")
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 371 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 283 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1024 , UpperCamelCase_=1024 , UpperCamelCase_=False , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="""train""" , **UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = tok.pad_token_id
def get_lens(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__SCREAMING_SNAKE_CASE = []
for batch in dl:
__SCREAMING_SNAKE_CASE = batch["""input_ids"""].ne(UpperCamelCase_ ).sum(1 ).tolist()
__SCREAMING_SNAKE_CASE = batch["""labels"""].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
__SCREAMING_SNAKE_CASE = get_lens(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="""val""" , **UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 100 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( _A : Optional[Any] , _A : List[str]=7 ) ->str:
"""simple docstring"""
lowerCamelCase_ =None
if token is not None:
lowerCamelCase_ ={"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
lowerCamelCase_ ="""636036"""
lowerCamelCase_ =f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
lowerCamelCase_ =requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def __UpperCamelCase ( _A : Optional[int] ) ->int:
"""simple docstring"""
lowerCamelCase_ =get_daily_ci_runs(_A )
lowerCamelCase_ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase_ =workflow_run["""id"""]
break
return workflow_run_id
def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
lowerCamelCase_ =get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase_ =artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def __UpperCamelCase ( _A : int , _A : Any , _A : Optional[int] ) ->List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
lowerCamelCase_ ={}
for artifact_name in artifact_names:
lowerCamelCase_ =os.path.join(_A , f'{artifact_name}.zip' )
if os.path.isfile(_A ):
lowerCamelCase_ ={}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
lowerCamelCase_ =f.read().decode("""UTF-8""" )
return results
| 154 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_lowerCamelCase : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 10_000
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetConfig
def A ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCamelCase = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , 'rb' ) as f:
UpperCamelCase = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'files': files} ) )
return splits
def A ( self : Any , UpperCamelCase__ : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def A ( self : List[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , 'rb' ) as f:
UpperCamelCase = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCamelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 249 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCamelCase : Dict = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowerCamelCase : Any = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowerCamelCase : Dict = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCamelCase : List[str] = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowerCamelCase : Any = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(A__ , A__ )
return k
def __lowerCamelCase ( A__ , A__ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**A__ )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(A__ )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
UpperCamelCase = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A__ )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
UpperCamelCase = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(A__ , A__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(A__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCamelCase = mapping['model.embed_positions.weight']
UpperCamelCase = mapping.pop('model.embed_positions.weight' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(A__ , strict=A__ )
UpperCamelCase = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(A__ )
UpperCamelCase = {}
UpperCamelCase = ['global_step']
for name, shape in tqdm(A__ , desc='converting tf checkpoint to dict' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(A__ , A__ )
UpperCamelCase = array
return tf_weights
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(A__ )
UpperCamelCase = convert_bigbird_pegasus(A__ , A__ )
torch_model.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 249 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase__: Union[str, Any] = "examples/"
UpperCamelCase__: Optional[Any] = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCamelCase__: Optional[int] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCamelCase__: List[Any] = "README.md"
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> Optional[int]:
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
UpperCAmelCase , UpperCAmelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase : List[Any] = replace.replace('''VERSION''' , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[int]:
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='''examples''' )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase : Optional[int] = '''1. Want to contribute a new model?'''
with open(_lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
UpperCAmelCase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase : Optional[int] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowerCAmelCase )
def snake_case_ ( ) -> Optional[Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : int = REPLACE_PATTERNS['''init'''][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase : Optional[int] = default_version.base_version
elif patch:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase : Dict = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Tuple = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def snake_case_ ( ) -> Any:
UpperCAmelCase : List[Any] = get_version()
UpperCAmelCase : List[str] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
UpperCAmelCase : Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCamelCase__: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 23 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = 'src/diffusers'
a : Optional[Any] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
a : Dict = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : str = spec.loader.load_module()
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: List[str] ):
"""simple docstring"""
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = object_name.split(""".""" )
UpperCAmelCase_: Tuple = 0
# First let's find the module where our object lives.
UpperCAmelCase_: Union[str, Any] = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[Any] = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_: Any = """"""
UpperCAmelCase_: Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_: Dict = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: Optional[int] = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
a : List[str] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
a : Optional[int] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
a : List[Any] = re.compile(r'<FILL\s+[^>]*>')
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: Dict = code.split("""\n""" )
UpperCAmelCase_: Any = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: str = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
UpperCAmelCase_: Union[str, Any] = F'class Bla:\n{code}'
UpperCAmelCase_: int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCAmelCase__ )
UpperCAmelCase_: int = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: int=False ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase_: List[str] = f.readlines()
UpperCAmelCase_: List[str] = []
UpperCAmelCase_: Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
UpperCAmelCase_: Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = search.groups()
UpperCAmelCase_: str = find_code_in_diffusers(lowerCAmelCase__ )
UpperCAmelCase_: int = get_indent(lowerCAmelCase__ )
UpperCAmelCase_: Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_: Tuple = theoretical_indent
UpperCAmelCase_: Dict = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_: Tuple = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
UpperCAmelCase_: Any = lines[line_index]
UpperCAmelCase_: Tuple = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_: int = lines[start_index:line_index]
UpperCAmelCase_: Union[str, Any] = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_: int = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
UpperCAmelCase_: Union[str, Any] = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Any = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCAmelCase_: int = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = pattern.groups()
UpperCAmelCase_: int = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
UpperCAmelCase_: List[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_: Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_: Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_: str = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_: Optional[int] = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def lowerCAmelCase_ (lowerCAmelCase__: bool = False ):
"""simple docstring"""
UpperCAmelCase_: Dict = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = []
for filename in all_files:
UpperCAmelCase_: str = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_: Dict = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 147 | 0 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """num_heads""" ) )
class A :
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any=13 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Optional[Any]=[16, 48, 96] , _UpperCAmelCase : Tuple=[1, 3, 6] , _UpperCAmelCase : int=[1, 2, 10] , _UpperCAmelCase : int=[7, 3, 3] , _UpperCAmelCase : Tuple=[4, 2, 2] , _UpperCAmelCase : str=[2, 1, 1] , _UpperCAmelCase : Any=[2, 2, 2] , _UpperCAmelCase : Any=[False, False, True] , _UpperCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=True , _UpperCAmelCase : int=2 , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = stride_kv
lowercase__ = depth
lowercase__ = cls_token
lowercase__ = attention_drop_rate
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = TFCvtModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , training=_UpperCAmelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFCvtForImageClassification(_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ = TFCvtModelTester(self )
lowercase__ = TFCvtConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depth )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFCvtModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""tf""" )
# forward pass
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1E-4 ) )
| 146 |
class A :
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : list ) -> None:
"""simple docstring"""
lowercase__ = set_counts
lowercase__ = max(_UpperCAmelCase )
lowercase__ = len(_UpperCAmelCase )
lowercase__ = [1] * num_sets
lowercase__ = list(range(_UpperCAmelCase ) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
lowercase__ = self.get_parent(_UpperCAmelCase )
lowercase__ = self.get_parent(_UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ = 0
lowercase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ = 0
lowercase__ = src_parent
lowercase__ = self.set_counts[src_parent]
lowercase__ = max(self.max_set , _UpperCAmelCase )
return True
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 146 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = AltDiffusionPipeline
lowerCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
_UpperCAmelCase : Optional[Any] = CLIPTextModel(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Union[str, Any] = 77
_UpperCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any]=0 ) ->Union[str, Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : str = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
torch.manual_seed(0 )
_UpperCAmelCase : Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCAmelCase : List[Any] = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = text_encoder
_UpperCAmelCase : str = AltDiffusionPipeline(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Dict = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : List[str] = "A photo of an astronaut"
_UpperCAmelCase : List[Any] = alt_pipe(**lowerCamelCase__ )
_UpperCAmelCase : Any = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : List[str] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
_UpperCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCAmelCase : str = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = text_encoder
_UpperCAmelCase : Optional[int] = AltDiffusionPipeline(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : Dict = alt_pipe(**lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=lowerCamelCase__ )
_UpperCAmelCase : int = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : int = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Any = torch.manual_seed(0 )
_UpperCAmelCase : Any = alt_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Dict = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
_UpperCAmelCase : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
_UpperCAmelCase : List[str] = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : int = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : int = alt_pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="numpy" )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Dict = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 234 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = ["input_ids", "token_type_ids"]
lowerCAmelCase : Optional[Any] = FNetTokenizer
def __init__( self : Dict , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : List[str]="[SEP]" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : Optional[Any]="[CLS]" , lowerCamelCase__ : Any="[MASK]" , **lowerCamelCase__ : Any , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = (
AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ , normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Tuple = remove_space
_UpperCAmelCase : List[Any] = keep_accents
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : str = False if not self.vocab_file else True
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 234 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self :Tuple , a :int = 1_6 , a :int = 8_8 , a :Optional[int] = None , a :int = 1 , a :float = 0.0 , a :int = 3_2 , a :Optional[int] = None , a :bool = False , a :Optional[int] = None , a :Optional[int] = None , a :str = "geglu" , a :Optional[int] = None , ) -> str:
super().__init__()
__UpperCamelCase : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a , attention_head_dim=a , in_channels=a , num_layers=a , dropout=a , norm_num_groups=a , cross_attention_dim=a , attention_bias=a , sample_size=a , num_vector_embeds=a , activation_fn=a , num_embeds_ada_norm=a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__UpperCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__UpperCamelCase : str = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__UpperCamelCase : Optional[int] = [1, 0]
def _lowerCamelCase ( self :Any , a :Optional[Any] , a :int , a :Dict=None , a :Optional[int]=None , a :int=None , a :bool = True , ) -> str:
__UpperCamelCase : List[str] = hidden_states
__UpperCamelCase : Tuple = []
__UpperCamelCase : Any = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__UpperCamelCase : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__UpperCamelCase : List[str] = self.transformer_index_for_condition[i]
__UpperCamelCase : Optional[int] = self.transformers[transformer_index](
a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , return_dict=a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__UpperCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__UpperCamelCase : Tuple = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a ) | 151 |
import random
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict:
'''simple docstring'''
__UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase):
for j in range(i + 1 , _lowerCamelCase):
if random.random() < probability:
graph[i].append(_lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase)
return graph
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict ,*lowercase__ : Tuple ,**lowercase__ : List[Any] ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 104 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1024 , _UpperCAmelCase=1024 , _UpperCAmelCase=3.6 ):
lowercase__: int = tokenizer
lowercase__: Optional[int] = tokenizer.bos_token_id
lowercase__: Dict = dataset
lowercase__: Union[str, Any] = seq_length
lowercase__: Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
lowercase__: Tuple = iter(self.dataset )
lowercase__: Union[str, Any] = True
while more_examples:
lowercase__: List[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__snake_case )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase__: Dict = False
break
lowercase__: Dict = tokenizer(__snake_case , truncation=__snake_case )['input_ids']
lowercase__: int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__snake_case ) , self.seq_length ):
lowercase__: str = all_token_ids[i : i + self.seq_length]
if len(__snake_case ) == self.seq_length:
yield torch.tensor(__snake_case )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: Union[str, Any] = {'streaming': True}
lowercase__: List[Any] = load_dataset(args.dataset_name , split='''train''' , **_A )
lowercase__: Optional[Any] = ConstantLengthDataset(_A , _A , seq_length=args.seq_length )
lowercase__: Union[str, Any] = DataLoader(_A , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
model.eval()
lowercase__: str = []
for step, batch in enumerate(_A ):
with torch.no_grad():
lowercase__: Optional[int] = model(_A , labels=_A )
lowercase__: List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_A ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase__: str = torch.mean(torch.cat(_A ) )
try:
lowercase__: Dict = torch.exp(_A )
except OverflowError:
lowercase__: Dict = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__A = Accelerator()
# Parse configuration
__A = HfArgumentParser(EvaluationArguments)
__A = parser.parse_args()
set_seed(args.seed)
# Logging
__A = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__A = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__A = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__A = create_dataloader(args)
# Prepare everything with our `accelerator`.
__A = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__A = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
from collections import Counter
from timeit import timeit
def __lowerCamelCase ( lowerCamelCase__ = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return True
lowercase__ : Any = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowercase__ : List[Any] = character_freq_dict.get(lowerCamelCase__ , 0 ) + 1
lowercase__ : str = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
print("\nFor string = " , lowerCamelCase__ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCAmelCase__ = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
lowerCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 130 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ['''model.decoder.embed_positions.weights''']
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "emb" in name:
lowercase__ : int = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
lowercase__ : Any = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
lowercase__ : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
lowercase__ : int = name.replace("linear1" , "fc1" )
if "linear2" in name:
lowercase__ : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
lowercase__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
lowercase__ : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
lowercase__ : Union[str, Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = list(state_dict.keys() )
lowercase__ : Dict = {}
for key in keys:
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : Union[str, Any] = rename_keys(lowerCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ : Optional[int] = val[:hidden_size, :]
lowercase__ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
lowercase__ : List[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ : Union[str, Any] = val
else:
lowercase__ : List[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if checkpoint == "small":
# default config values
lowercase__ : Optional[Any] = 1_024
lowercase__ : int = 24
lowercase__ : Optional[Any] = 16
elif checkpoint == "medium":
lowercase__ : str = 1_536
lowercase__ : Union[str, Any] = 48
lowercase__ : Optional[int] = 24
elif checkpoint == "large":
lowercase__ : Tuple = 2_048
lowercase__ : Union[str, Any] = 48
lowercase__ : Dict = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ : int = MusicgenDecoderConfig(
hidden_size=lowerCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase__ , num_attention_heads=lowerCamelCase__ , )
return config
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ):
"""simple docstring"""
lowercase__ : List[Any] = MusicGen.get_pretrained(lowerCamelCase__ , device=lowerCamelCase__ )
lowercase__ : str = decoder_config_from_checkpoint(lowerCamelCase__ )
lowercase__ : Optional[Any] = fairseq_model.lm.state_dict()
lowercase__ , lowercase__ : Tuple = rename_state_dict(
lowerCamelCase__ , hidden_size=decoder_config.hidden_size )
lowercase__ : str = TaEncoderModel.from_pretrained("t5-base" )
lowercase__ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
lowercase__ : List[str] = MusicgenForCausalLM(lowerCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ , lowercase__ : List[str] = decoder.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ : Any = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase__ , audio_encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase__ )
# check we can do a forward pass
lowercase__ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ : List[str] = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("t5-base" )
lowercase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
lowercase__ : Optional[Any] = MusicgenProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# set the appropriate bos/pad token ids
lowercase__ : List[Any] = 2_048
lowercase__ : List[Any] = 2_048
# set other default generation config params
lowercase__ : str = int(30 * audio_encoder.config.frame_rate )
lowercase__ : List[Any] = True
lowercase__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase__ )
processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 130 | 1 |
from __future__ import annotations
from typing import Any
class a__:
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float = 0 ):
a : Any = row, column
a : int = [[default_value for c in range(__snake_case )] for r in range(__snake_case )]
def __str__( self : Any ):
a : Dict = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
a : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
a : Union[str, Any] = max(__snake_case , len(str(__snake_case ) ) )
a : Optional[Any] = F"""%{max_element_length}s"""
# Make string and return
def single_line(__snake_case : list[float] ) -> str:
nonlocal string_format_identifier
a : int = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__snake_case ) for row_vector in self.array )
return s
def __repr__( self : Any ):
return str(self )
def lowercase_ ( self : List[Any] , __snake_case : tuple[int, int] ):
if not (isinstance(__snake_case , (list, tuple) ) and len(__snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Union[str, Any] , __snake_case : tuple[int, int] ):
assert self.validate_indicies(__snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __snake_case : tuple[int, int] , __snake_case : float ):
assert self.validate_indicies(__snake_case )
a : Dict = value
def __add__( self : List[str] , __snake_case : Matrix ):
assert isinstance(__snake_case , __snake_case )
assert self.row == another.row and self.column == another.column
# Add
a : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Any ):
a : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a : List[str] = -self[r, c]
return result
def __sub__( self : Optional[Any] , __snake_case : Matrix ):
return self + (-another)
def __mul__( self : List[Any] , __snake_case : int | float | Matrix ):
if isinstance(__snake_case , (int, float) ): # Scalar multiplication
a : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a : List[Any] = self[r, c] * another
return result
elif isinstance(__snake_case , __snake_case ): # Matrix multiplication
assert self.column == another.row
a : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a : Optional[Any] = F"""Unsupported type given for another ({type(__snake_case )})"""
raise TypeError(__snake_case )
def lowercase_ ( self : Any ):
a : Tuple = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a : List[Any] = self[r, c]
return result
def lowercase_ ( self : Any , __snake_case : Matrix , __snake_case : Matrix ):
assert isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a : Union[str, Any] = v.transpose()
a : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
# a^(-1)
a : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
a : List[Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
a : List[Any] = Matrix(3 , 1 , 0 )
a : Optional[int] = 1, 2, -3
a : Optional[int] = Matrix(3 , 1 , 0 )
a : Optional[int] = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_A , _A )}""" )
def lowerCamelCase__ ( ):
import doctest
doctest.testmod()
testa() | 353 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = """encoder-decoder"""
lowercase__ = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a : List[str] = kwargs.pop('encoder' )
a : Optional[Any] = encoder_config.pop('model_type' )
a : Tuple = kwargs.pop('decoder' )
a : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
a : Any = AutoConfig.for_model(__snake_case , **__snake_case )
a : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
a : Tuple = True
@classmethod
def lowercase_ ( cls : int , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
a : List[Any] = True
a : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = copy.deepcopy(self.__dict__ )
a : List[str] = self.encoder.to_dict()
a : Optional[int] = self.decoder.to_dict()
a : Optional[Any] = self.__class__.model_type
return output | 96 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowerCamelCase = logging.getLogger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = "sequence-classification"
def __init__(self : List[str] , snake_case__ : str ) -> Dict:
'''simple docstring'''
if type(snake_case__ ) == dict:
snake_case : Dict = Namespace(**snake_case__ )
snake_case : Dict = glue_output_modes[hparams.task]
snake_case : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(snake_case__ , snake_case__ , self.mode )
def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.model(**snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any] ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
snake_case : str = self(**snake_case__ )
snake_case : List[str] = outputs[0]
snake_case : List[str] = self.trainer.lr_schedulers[0]["scheduler"]
snake_case : Union[str, Any] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = self.hparams
snake_case : Union[str, Any] = processors[args.task]()
snake_case : Union[str, Any] = processor.get_labels()
for mode in ["train", "dev"]:
snake_case : str = self._feature_file(snake_case__ )
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , snake_case__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
snake_case : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
snake_case : Optional[int] = convert_examples_to_features(
snake_case__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , snake_case__ )
torch.save(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : bool = False ) -> DataLoader:
'''simple docstring'''
snake_case : Dict = "dev" if mode == "test" else mode
snake_case : Any = self._feature_file(snake_case__ )
logger.info("Loading features from cached file %s" , snake_case__ )
snake_case : Any = torch.load(snake_case__ )
snake_case : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case : int = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case : Dict = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , batch_size=snake_case__ , shuffle=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : int , snake_case__ : Tuple ) -> str:
'''simple docstring'''
snake_case : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case : List[Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
snake_case : Dict = self(**snake_case__ )
snake_case , snake_case : Union[str, Any] = outputs[:2]
snake_case : Union[str, Any] = logits.detach().cpu().numpy()
snake_case : Dict = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Optional[int] ) -> tuple:
'''simple docstring'''
snake_case : int = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
snake_case : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case : int = np.argmax(snake_case__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case : int = np.squeeze(snake_case__ )
snake_case : Dict = np.concatenate([x["target"] for x in outputs] , axis=0 )
snake_case : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
snake_case : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
snake_case : str = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , snake_case__ , snake_case__ )}
snake_case : Any = dict(results.items() )
snake_case : List[Any] = results
return ret, preds_list, out_label_list
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : list ) -> dict:
'''simple docstring'''
snake_case , snake_case , snake_case : Union[str, Any] = self._eval_end(snake_case__ )
snake_case : List[str] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Any ) -> dict:
'''simple docstring'''
snake_case , snake_case , snake_case : Optional[int] = self._eval_end(snake_case__ )
snake_case : List[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : List[Any] , snake_case__ : List[str] ) -> str:
'''simple docstring'''
BaseTransformer.add_model_specific_args(snake_case__ , snake_case__ )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=snake_case__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=snake_case__ , required=snake_case__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=snake_case__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def UpperCamelCase ( ):
snake_case : int = argparse.ArgumentParser()
add_generic_args(__lowerCamelCase , os.getcwd() )
snake_case : Tuple = GLUETransformer.add_model_specific_args(__lowerCamelCase , os.getcwd() )
snake_case : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case : int = os.path.join(
"./results" , f"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
snake_case : Optional[int] = GLUETransformer(__lowerCamelCase )
snake_case : str = generic_train(__lowerCamelCase , __lowerCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case : str = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=__lowerCamelCase ) )
snake_case : List[str] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCamelCase )
if __name__ == "__main__":
main()
| 59 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCamelCase = TaTokenizerFast
__lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 59 | 1 |
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__a = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__a = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sgugger/tiny-distilbert-classification'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = 'patrickvonplaten/t5-tiny-random'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(UpperCAmelCase , 'env.csv' ) , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'env.csv' ) ).exists() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(UpperCAmelCase ):
self.assertTrue(hasattr(UpperCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'current' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , 'log.txt' ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'log.txt' ) ).exists() )
| 197 | 1 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase__ = Mapping[str, np.ndarray]
lowerCAmelCase__ = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase__ = 0.01
@dataclasses.dataclass(frozen=_lowercase )
class _lowerCamelCase :
UpperCAmelCase_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase_ = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase_ = None
# Chain corresponding to each parent
UpperCAmelCase_ = None
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = r"(\[[A-Z]+\]\n)"
UpperCamelCase = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
UpperCamelCase = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
UpperCamelCase = ["N", "CA", "C"]
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCamelCase = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
UpperCamelCase = "X" # FIXME: strings are immutable
UpperCamelCase = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCamelCase = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCamelCase = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
UpperCamelCase = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
UpperCamelCase = prot.parents
UpperCamelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCamelCase = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
UpperCamelCase = ["N/A"]
pdb_headers.append(F"PARENT {' '.join(_SCREAMING_SNAKE_CASE )}" )
return pdb_headers
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = pdb_str.split("\n" )
UpperCamelCase = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
UpperCamelCase = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCamelCase = []
if prot.parents_chain_index is not None:
UpperCamelCase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCamelCase = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ["N/A"] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCamelCase = [["N/A"]]
def make_parent_line(_SCREAMING_SNAKE_CASE ) -> str:
return F"PARENT {' '.join(_SCREAMING_SNAKE_CASE )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCamelCase = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = parents_per_chain[chain_counter]
else:
UpperCamelCase = ["N/A"]
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = residue_constants.restypes + ["X"]
def res_atoa(_SCREAMING_SNAKE_CASE ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
UpperCamelCase = residue_constants.atom_types
UpperCamelCase = []
UpperCamelCase = prot.atom_mask
UpperCamelCase = prot.aatype
UpperCamelCase = prot.atom_positions
UpperCamelCase = prot.residue_index.astype(np.intaa )
UpperCamelCase = prot.b_factors
UpperCamelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
UpperCamelCase = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
UpperCamelCase = aatype.shape[0]
UpperCamelCase = 1
UpperCamelCase = 0
UpperCamelCase = string.ascii_uppercase
UpperCamelCase = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCamelCase = "ATOM"
UpperCamelCase = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F" {atom_name}"
UpperCamelCase = ""
UpperCamelCase = ""
UpperCamelCase = 1.00
UpperCamelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCamelCase = ""
UpperCamelCase = "A"
if chain_index is not None:
UpperCamelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCamelCase = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
UpperCamelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCamelCase = True
UpperCamelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCamelCase = "TER"
UpperCamelCase = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 153 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 1_000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__a = {
'gpt-neox-20b': 2_048,
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="<|endoftext|>" ,_SCREAMING_SNAKE_CASE="<|endoftext|>" ,_SCREAMING_SNAKE_CASE="<|endoftext|>" ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Any:
super().__init__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase_ : List[Any] = getattr(_SCREAMING_SNAKE_CASE ,pre_tok_state.pop('''type''' ) )
UpperCAmelCase_ : Dict = add_prefix_space
UpperCAmelCase_ : int = pre_tok_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = add_prefix_space
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCAmelCase_ : Tuple = input_ids[-self.model_max_length :]
return input_ids | 235 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : str = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).tokenizer
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).image_processor
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : int = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''lower newer'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] ) | 235 | 1 |
__lowerCAmelCase : List[str] ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 9 |
from __future__ import annotations
from typing import Any
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 259 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number > 0:
raise ValueError('''input must be a negative integer''' )
__UpperCamelCase :Any = len(bin(SCREAMING_SNAKE_CASE )[3:] )
__UpperCamelCase :Any = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__UpperCamelCase :Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__lowercase = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = list(s_dict.keys() )
for key in keys:
__UpperCamelCase :Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__UpperCamelCase :str = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"""{key} -> {new_key}""" )
__UpperCamelCase :Any = s_dict.pop(SCREAMING_SNAKE_CASE )
return s_dict
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = emb.weight.shape
__UpperCamelCase :Any = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = emb.weight.data
return lin_layer
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = os.path.basename(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = url.split('''/''' )[-2]
__UpperCamelCase :Tuple = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ) and not os.path.isfile(SCREAMING_SNAKE_CASE ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = open(SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE ) as source, open(SCREAMING_SNAKE_CASE , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=SCREAMING_SNAKE_CASE , unit_divisor=1_024 ) as loop:
while True:
__UpperCamelCase :Optional[Any] = source.read(8_192 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE )
loop.update(len(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :str = open(SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
__UpperCamelCase :Tuple = _download(_MODELS[checkpoint_path] )
else:
__UpperCamelCase :Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCamelCase :Union[str, Any] = original_checkpoint['''dims''']
__UpperCamelCase :List[Any] = original_checkpoint['''model_state_dict''']
__UpperCamelCase :Optional[Any] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
rename_keys(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = True
__UpperCamelCase :Tuple = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
__UpperCamelCase :Dict = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=SCREAMING_SNAKE_CASE , decoder_ffn_dim=SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
__UpperCamelCase :str = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Any = model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0 and not set(SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__UpperCamelCase :Optional[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCamelCase :Union[str, Any] = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 105 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {'''vocab_file''': '''vocab.txt'''}
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> int:
with open(_lowerCamelCase , 'r' ) as f:
lowerCamelCase_ = f.read().splitlines()
return [l.strip() for l in lines]
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="<cls>" , __SCREAMING_SNAKE_CASE : List[Any]="<pad>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , __SCREAMING_SNAKE_CASE : Dict="<eos>" , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[Any]:
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = load_vocab_file(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dict(enumerate(self.all_tokens ) )
lowerCamelCase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase_ = unk_token
lowerCamelCase_ = cls_token
lowerCamelCase_ = pad_token
lowerCamelCase_ = mask_token
lowerCamelCase_ = eos_token
lowerCamelCase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : int ) -> str:
return self._id_to_token.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str ) -> int:
return self._token_to_id.get(__SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
return text.split()
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> Union[str, Any]:
return len(self._id_to_token )
def UpperCamelCase ( self : Dict ) -> List[Any]:
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : str ) -> int:
return self._token_to_id.get(__SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
return self._id_to_token.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List , __SCREAMING_SNAKE_CASE : Optional[List] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase_ = [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(__SCREAMING_SNAKE_CASE ) + [1]
return mask
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int ) -> Dict:
lowerCamelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(__SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCamelCase ( self : Optional[int] ) -> int:
return self.get_vocab_size(with_added_tokens=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[List[str], List[AddedToken]] , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
return super()._add_tokens(__SCREAMING_SNAKE_CASE , special_tokens=__SCREAMING_SNAKE_CASE )
| 183 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE : Any = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
_SCREAMING_SNAKE_CASE : int = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = LxmertTokenizer
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : int="[PAD]" , __SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , __SCREAMING_SNAKE_CASE : Any="[MASK]" , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Any , ) -> Any:
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = do_lower_case
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=None ) -> Dict:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 183 | 1 |
'''simple docstring'''
from collections.abc import Callable
def __magic_name__ ( A , A , A ) -> float:
snake_case = a
snake_case = b
if function(A ) == 0: # one of the a or b is a root for the function
return a
elif function(A ) == 0:
return b
elif (
function(A ) * function(A ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
snake_case = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(A ) == 0:
return mid
elif function(A ) * function(A ) < 0:
snake_case = mid
else:
snake_case = mid
snake_case = start + (end - start) / 2.0
return mid
def __magic_name__ ( A ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 0 |
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 256
# Modulus to hash a string
UpperCAmelCase_ : Any = 100_0003
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = len(A__ )
_SCREAMING_SNAKE_CASE : List[Any] = len(A__ )
if p_len > t_len:
return False
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(A__ ):
_SCREAMING_SNAKE_CASE : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_SCREAMING_SNAKE_CASE : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_SCREAMING_SNAKE_CASE : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_SCREAMING_SNAKE_CASE : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = 'abc1abc12'
_SCREAMING_SNAKE_CASE : Optional[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_SCREAMING_SNAKE_CASE : List[Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(A__ , A__ ) and not rabin_karp(A__ , A__ )
# Test 2)
_SCREAMING_SNAKE_CASE : int = 'ABABX'
_SCREAMING_SNAKE_CASE : int = 'ABABZABABYABABX'
assert rabin_karp(A__ , A__ )
# Test 3)
_SCREAMING_SNAKE_CASE : List[Any] = 'AAAB'
_SCREAMING_SNAKE_CASE : Optional[Any] = 'ABAAAAAB'
assert rabin_karp(A__ , A__ )
# Test 4)
_SCREAMING_SNAKE_CASE : List[str] = 'abcdabcy'
_SCREAMING_SNAKE_CASE : Optional[int] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(A__ , A__ )
# Test 5)
_SCREAMING_SNAKE_CASE : Optional[Any] = 'Lü'
_SCREAMING_SNAKE_CASE : Tuple = 'Lüsai'
assert rabin_karp(A__ , A__ )
_SCREAMING_SNAKE_CASE : Dict = 'Lue'
assert not rabin_karp(A__ , A__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 200 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( A__ ) -> Tuple:
# A local function to see if a dot lands in the circle.
def is_in_circle(A__ , A__ ) -> bool:
a__ : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(A__ ) )
# The ratio of the area for circle to square is pi/4.
a__ : Optional[Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def A_ ( A__ , A__ , A__ = 0.0 , A__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value)
def A_ ( A__ , A__ = 0.0 , A__ = 1.0 ) -> None:
def identity_function(A__ ) -> float:
return x
a__ : List[Any] = area_under_curve_estimator(
A__ , A__ , A__ , A__ )
a__ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def A_ ( A__ ) -> None:
def function_to_integrate(A__ ) -> float:
return sqrt(4.0 - x * x )
a__ : Dict = area_under_curve_estimator(
A__ , A__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = LEDTokenizer
__lowercase : int = LEDTokenizerFast
__lowercase : Dict = True
def __A ( self ) -> Any:
super().setUp()
A_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
A_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A_ = {'''unk_token''': '''<unk>'''}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def __A ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __A ( self ) -> Optional[int]:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __A ( self ) -> int:
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ = tokenizer(_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A_ = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_torch
def __A ( self ) -> List[str]:
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _SCREAMING_SNAKE_CASE )
self.assertIn('''attention_mask''' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('''labels''' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('''decoder_attention_mask''' , _SCREAMING_SNAKE_CASE )
@require_torch
def __A ( self ) -> Any:
A_ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ = tokenizer(text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __A ( self ) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def __A ( self ) -> Optional[Any]:
A_ = ['''A long paragraph for summarization.''']
A_ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A_ = tokenizer(text_target=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A_ = inputs['''input_ids''']
A_ = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __A ( self ) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ = ['''Summary of the text.''', '''Another summary.''']
A_ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
A_ = [[0] * len(_SCREAMING_SNAKE_CASE ) for x in encoded_output['''input_ids''']]
A_ = tokenizer.pad(_SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
pass
def __A ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ = '''A, <mask> AllenNLP sentence.'''
A_ = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
A_ = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
A_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
A_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 362 | '''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : int | str ) -> bool:
A_ = str(_UpperCamelCase )
return n == n[::-1]
def _UpperCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> Any:
A_ = 0
for i in range(1, _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=2, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> int:
"""simple docstring"""
a__ =parent
a__ =13
a__ =7
a__ =True
a__ =True
a__ =True
a__ =True
a__ =99
a__ =384
a__ =2
a__ =4
a__ =37
a__ ='''gelu'''
a__ =0.1
a__ =0.1
a__ =512
a__ =16
a__ =2
a__ =0.02
a__ =3
a__ =4
a__ =128
a__ =2
a__ =9
a__ =1
a__ =None
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowercase_, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Tuple:
"""simple docstring"""
a__ =TFConvBertModel(config=lowercase_ )
a__ ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ =[input_ids, input_mask]
a__ =model(lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
a__ =TFConvBertForMaskedLM(config=lowercase_ )
a__ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
a__ =model(lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =TFConvBertForSequenceClassification(config=lowercase_ )
a__ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
a__ =model(lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =self.num_choices
a__ =TFConvBertForMultipleChoice(config=lowercase_ )
a__ =tf.tile(tf.expand_dims(lowercase_, 1 ), (1, self.num_choices, 1) )
a__ =tf.tile(tf.expand_dims(lowercase_, 1 ), (1, self.num_choices, 1) )
a__ =tf.tile(tf.expand_dims(lowercase_, 1 ), (1, self.num_choices, 1) )
a__ ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
a__ =model(lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> int:
"""simple docstring"""
a__ =self.num_labels
a__ =TFConvBertForTokenClassification(config=lowercase_ )
a__ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
a__ =model(lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
a__ =TFConvBertForQuestionAnswering(config=lowercase_ )
a__ ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
a__ =model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : int = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =TFConvBertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =True
if hasattr(lowercase_, '''use_cache''' ):
a__ =True
a__ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
a__ =getattr(self.model_tester, '''key_length''', lowercase_ )
for model_class in self.all_model_classes:
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =model_class(lowercase_ )
a__ =len(model(lowercase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_, saved_model=lowercase_ )
a__ =os.path.join(lowercase_, '''saved_model''', '''1''' )
a__ =tf.keras.models.load_model(lowercase_ )
a__ =model(lowercase_ )
if self.is_encoder_decoder:
a__ =outputs['''encoder_hidden_states''']
a__ =outputs['''encoder_attentions''']
else:
a__ =outputs['''hidden_states''']
a__ =outputs['''attentions''']
self.assertEqual(len(lowercase_ ), lowercase_ )
a__ =getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ), lowercase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(lowercase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowercase_ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length )
a__ =getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
a__ =getattr(self.model_tester, '''key_length''', lowercase_ )
a__ =getattr(self.model_tester, '''key_length''', lowercase_ )
def check_decoder_attentions_output(lowercase_ ):
a__ =len(lowercase_ )
self.assertEqual(out_len % 2, 0 )
a__ =outputs.decoder_attentions
self.assertEqual(len(lowercase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(lowercase_ ):
a__ =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
a__ =True
a__ =False
a__ =model_class(lowercase_ )
a__ =model(self._prepare_for_class(lowercase_, lowercase_ ) )
a__ =len(lowercase_ )
self.assertEqual(config.output_hidden_states, lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
a__ =model_class(lowercase_ )
a__ =model(self._prepare_for_class(lowercase_, lowercase_ ) )
self.assertEqual(config.output_hidden_states, lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ =True
a__ =model_class(lowercase_ )
a__ =model(self._prepare_for_class(lowercase_, lowercase_ ) )
self.assertEqual(config.output_hidden_states, lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
a__ =True
a__ =True
a__ =model_class(lowercase_ )
a__ =model(self._prepare_for_class(lowercase_, lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states, lowercase_ )
check_encoder_attentions_output(lowercase_ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
a__ =tf.constant([[0, 1, 2, 3, 4, 5]] )
a__ =model(lowercase_ )[0]
a__ =[1, 6, 768]
self.assertEqual(output.shape, lowercase_ )
a__ =tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowercase_, atol=1E-4 )
| 188 |
from __future__ import annotations
def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( a ):
lowercase__ : Optional[Any] = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ : Optional[int] = """CIDAS/clipseg-rd64-refined"""
lowercase__ : Tuple = """image_segmenter"""
lowercase__ : Optional[Any] = CLIPSegForImageSegmentation
lowercase__ : int = ["""image""", """text"""]
lowercase__ : List[str] = ["""image"""]
def __init__( self : str , *_UpperCamelCase : str , **_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : "Image" , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCamelCase , return_tensors="pt" )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE = self.model(**_UpperCamelCase ).logits
return logits
def __snake_case( self : Any , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 206 | def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str = " " ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(UpperCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(UpperCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 206 | 1 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def A ( snake_case :Tuple ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def A ( snake_case :Tuple , snake_case :Any ) -> Dict:
return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean()
def A ( snake_case :str , snake_case :Union[str, Any] , snake_case :Tuple ) -> Any:
__UpperCamelCase = np.dot(snake_case , snake_case )
return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) )
def A ( snake_case :Any , snake_case :Optional[int] , snake_case :Optional[Any] , snake_case :Optional[Any]=7_0_0_0_0 ) -> Optional[Any]:
__UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(snake_case ):
__UpperCamelCase = np.dot(snake_case , snake_case )
__UpperCamelCase = sigmoid_function(snake_case )
__UpperCamelCase = np.dot(x.T , h - y ) / y.size
__UpperCamelCase = theta - alpha * gradient # updating the weights
__UpperCamelCase = np.dot(snake_case , snake_case )
__UpperCamelCase = sigmoid_function(snake_case )
__UpperCamelCase = cost_function(snake_case , snake_case )
if iterations % 1_0_0 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase : Any = datasets.load_iris()
UpperCamelCase : Optional[int] = iris.data[:, :2]
UpperCamelCase : int = (iris.target != 0) * 1
UpperCamelCase : List[Any] = 0.1
UpperCamelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print("theta: ", theta) # printing the theta i.e our weights vector
def A ( snake_case :Union[str, Any] ) -> List[str]:
return sigmoid_function(
np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((UpperCamelCase) , (UpperCamelCase)) : List[str] = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) : Union[str, Any] = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) : str = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase : Dict = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase : List[str] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 316 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_SCREAMING_SNAKE_CASE = HfApi()
_SCREAMING_SNAKE_CASE = {}
# fmt: off
_SCREAMING_SNAKE_CASE = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
_SCREAMING_SNAKE_CASE = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
_SCREAMING_SNAKE_CASE = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
_SCREAMING_SNAKE_CASE = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
_SCREAMING_SNAKE_CASE = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
_SCREAMING_SNAKE_CASE = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
_SCREAMING_SNAKE_CASE = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
_SCREAMING_SNAKE_CASE = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
_SCREAMING_SNAKE_CASE = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_SCREAMING_SNAKE_CASE = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
_SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
_SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_SCREAMING_SNAKE_CASE = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_SCREAMING_SNAKE_CASE = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 352 | import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_SCREAMING_SNAKE_CASE = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict()
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(UpperCamelCase_ ):
UpperCamelCase = token.rstrip("""\n""" )
UpperCamelCase = index
return vocab
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : Any=200 ):
"""simple docstring"""
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = list(lowerCamelCase_ )
if len(lowerCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_ ):
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = None
while start < end:
UpperCamelCase = """""".join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase_ )
UpperCamelCase = end
return sub_tokens
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = False
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any]="<d>" , lowerCamelCase_ : List[Any]="</d>" , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[Any]="<unk>" , lowerCamelCase_ : Optional[Any]="</n>" , lowerCamelCase_ : Tuple="</_>" , lowerCamelCase_ : Any="left" , **lowerCamelCase_ : str , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_ )
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_ ) )
return output_tokens
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ):
"""simple docstring"""
return token in self.encoder
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return "".join(lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(lowerCamelCase_ ):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
UpperCamelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder["""\n"""]
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
UpperCamelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ ))
return [1] + ([0] * len(lowerCamelCase_ ))
| 165 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=12 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=0 , UpperCamelCase__=None , ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : Dict = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Optional[int] = use_labels
snake_case : Tuple = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Optional[Any] = projection_dim
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : int = intermediate_size
snake_case : str = dropout
snake_case : List[Any] = attention_dropout
snake_case : Any = max_position_embeddings
snake_case : List[Any] = initializer_range
snake_case : Any = scope
snake_case : Union[str, Any] = bos_token_id
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = None
if self.use_input_mask:
snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case : Tuple = input_mask.numpy()
snake_case ,snake_case : str = input_mask.shape
snake_case : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
snake_case : int = 1
snake_case : Tuple = 0
snake_case : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = TFBlipTextModel(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , training=UpperCamelCase__ )
snake_case : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Tuple = config_and_inputs
snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Any = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = BlipTextModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = TFBlipTextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase__ )
| 203 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def __lowerCAmelCase ( lowercase : str = "dhaka" , lowercase : int = 5 ) -> int:
"""simple docstring"""
snake_case : List[Any] = min(lowercase , 50 ) # Prevent abuse!
snake_case : Optional[Any] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
snake_case : str = requests.get("https://www.google.com/search" , params=lowercase , headers=lowercase )
snake_case : List[str] = BeautifulSoup(html.text , "html.parser" )
snake_case : List[Any] = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
snake_case : Optional[Any] = json.dumps(lowercase )
snake_case : str = json.loads(lowercase )
snake_case : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowercase , )
if not matched_google_image_data:
return 0
snake_case : List[str] = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowercase ) , )
snake_case : Dict = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowercase , )
for index, fixed_full_res_image in enumerate(lowercase ):
if index >= max_images:
return index
snake_case : List[str] = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : Dict = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : int = urllib.request.build_opener()
snake_case : int = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowercase )
snake_case : Optional[int] = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
urllib.request.urlretrieve( # noqa: S310
lowercase , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__snake_case = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 203 | 1 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(a__ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a_ ( __lowercase : Tuple , __lowercase : Optional[Any] ) -> Any:
_snake_case = _distribute_shards(**a__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def a_ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> int:
_snake_case = _split_gen_kwargs(a__ , a__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(a__ ):
_number_of_shards_in_gen_kwargs(a__ )
else:
_snake_case = _number_of_shards_in_gen_kwargs(a__ )
assert out == expected | 360 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
_snake_case = cva.getAffineTransform(__lowercase , __lowercase )
return cva.warpAffine(__lowercase , __lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCamelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
_lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCamelCase , _lowerCamelCase : List[Any] = gray_img.shape
# set different points to rotate image
_lowerCamelCase : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowerCamelCase : List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCamelCase : Any = plt.figure(1)
_lowerCamelCase : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show() | 130 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.