code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Optional[int] = tempfile.mkdtemp()
# fmt: off
A: Tuple = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A: Tuple = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A: Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
A: List[Any] = {"unk_token": "<unk>"}
A: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCAmelCase ) )
A: List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A: Tuple = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A: Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A: List[str] = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : List[str] ) -> Dict:
'''simple docstring'''
A: Union[str, Any] = self.get_tokenizer()
A: Union[str, Any] = self.get_rust_tokenizer()
A: Dict = self.get_image_processor()
A: int = CLIPSegProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A: Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
A: List[Any] = CLIPSegProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A: Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def _snake_case ( self : str ) -> int:
'''simple docstring'''
A: Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A: str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A: int = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
A: Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def _snake_case ( self : Optional[int] ) -> int:
'''simple docstring'''
A: List[Any] = self.get_image_processor()
A: Tuple = self.get_tokenizer()
A: Any = CLIPSegProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A: int = self.prepare_image_inputs()
A: Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''np''' )
A: str = processor(images=_lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Union[str, Any] = self.get_image_processor()
A: Tuple = self.get_tokenizer()
A: Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A: str = "lower newer"
A: List[str] = processor(text=_lowerCAmelCase )
A: Tuple = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: int = self.get_image_processor()
A: int = self.get_tokenizer()
A: Optional[int] = CLIPSegProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A: Optional[int] = "lower newer"
A: List[Any] = self.prepare_image_inputs()
A: str = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A: Optional[Any] = self.get_image_processor()
A: Union[str, Any] = self.get_tokenizer()
A: List[Any] = CLIPSegProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A: Dict = self.prepare_image_inputs()
A: Dict = self.prepare_image_inputs()
A: Dict = processor(images=_lowerCAmelCase , visual_prompt=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _snake_case ( self : str ) -> List[str]:
'''simple docstring'''
A: Dict = self.get_image_processor()
A: Any = self.get_tokenizer()
A: Tuple = CLIPSegProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
A: Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A: Optional[Any] = processor.batch_decode(_lowerCAmelCase )
A: List[Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 319 |
import os
from math import logaa
def UpperCAmelCase__ ( lowerCamelCase = "base_exp.txt" ):
lowercase :float = 0
lowercase :str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase ), lowerCamelCase ) ) ):
lowercase , lowercase :str = list(map(lowerCamelCase, line.split("," ) ) )
if x * logaa(lowerCamelCase ) > largest:
lowercase :Optional[Any] = x * logaa(lowerCamelCase )
lowercase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 236 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase__ ( snake_case__ ):
def __init__( self : Tuple , snake_case__ : UNetaDModel , snake_case__ : UNetaDModel , snake_case__ : DDPMScheduler , snake_case__ : Any , ):
super().__init__()
lowerCamelCase_ : str =value_function
lowerCamelCase_ : Tuple =unet
lowerCamelCase_ : Dict =scheduler
lowerCamelCase_ : List[Any] =env
lowerCamelCase_ : Tuple =env.get_dataset()
lowerCamelCase_ : List[Any] ={}
for key in self.data.keys():
try:
lowerCamelCase_ : str =self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase_ : Dict ={}
for key in self.data.keys():
try:
lowerCamelCase_ : Optional[Any] =self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase_ : Tuple =env.observation_space.shape[0]
lowerCamelCase_ : List[Any] =env.action_space.shape[0]
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ):
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : str ):
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[str] ):
if type(snake_case__ ) is dict:
return {k: self.to_torch(snake_case__ ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case__ ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case__ , device=self.unet.device )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : str ):
for key, val in cond.items():
lowerCamelCase_ : Optional[int] =val.clone()
return x_in
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
lowerCamelCase_ : str =x.shape[0]
lowerCamelCase_ : Optional[Any] =None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase_ : List[Any] =torch.full((batch_size,) , snake_case__ , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase_ : Optional[int] =self.value_function(x.permute(0 , 2 , 1 ) , snake_case__ ).sample
lowerCamelCase_ : List[Any] =torch.autograd.grad([y.sum()] , [x] )[0]
lowerCamelCase_ : List[Any] =self.scheduler._get_variance(snake_case__ )
lowerCamelCase_ : int =torch.exp(0.5 * posterior_variance )
lowerCamelCase_ : List[Any] =model_std * grad
lowerCamelCase_ : str =0
lowerCamelCase_ : Optional[int] =x.detach()
lowerCamelCase_ : Optional[int] =x + scale * grad
lowerCamelCase_ : Dict =self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
lowerCamelCase_ : Tuple =self.unet(x.permute(0 , 2 , 1 ) , snake_case__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase_ : Optional[int] =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , predict_epsilon=snake_case__ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
lowerCamelCase_ : Tuple =self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
lowerCamelCase_ : Optional[int] =self.to_torch(snake_case__ )
return x, y
def __call__( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=64 , snake_case__ : Any=32 , snake_case__ : List[Any]=2 , snake_case__ : Tuple=0.1 ):
# normalize the observations and create batch dimension
lowerCamelCase_ : Dict =self.normalize(snake_case__ , "observations" )
lowerCamelCase_ : int =obs[None].repeat(snake_case__ , axis=0 )
lowerCamelCase_ : int ={0: self.to_torch(snake_case__ )}
lowerCamelCase_ : Union[str, Any] =(batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase_ : Tuple =randn_tensor(snake_case__ , device=self.unet.device )
lowerCamelCase_ : int =self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
lowerCamelCase_ : List[str] =self.to_torch(snake_case__ )
# run the diffusion process
lowerCamelCase_ , lowerCamelCase_ : List[str] =self.run_diffusion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# sort output trajectories by value
lowerCamelCase_ : Optional[int] =y.argsort(0 , descending=snake_case__ ).squeeze()
lowerCamelCase_ : Union[str, Any] =x[sorted_idx]
lowerCamelCase_ : Dict =sorted_values[:, :, : self.action_dim]
lowerCamelCase_ : str =actions.detach().cpu().numpy()
lowerCamelCase_ : Optional[Any] =self.de_normalize(snake_case__ , key="actions" )
# select the action with the highest value
if y is not None:
lowerCamelCase_ : Union[str, Any] =0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase_ : str =np.random.randint(0 , snake_case__ )
lowerCamelCase_ : List[str] =denorm_actions[selected_index, 0]
return denorm_actions
| 209 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
import requests
lowerCamelCase = '' # <-- Put your OpenWeatherMap appid here!
lowerCamelCase = 'https://api.openweathermap.org/data/2.5/'
def UpperCAmelCase__ ( _A : str = "Chicago" , _A : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def UpperCAmelCase__ ( _A : str = "Kolkata, India" , _A : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def UpperCAmelCase__ ( _A : float = 55.68 , _A : float = 12.57 , _A : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCamelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 188 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 0 |
"""simple docstring"""
from math import isqrt, loga
def lowercase ( lowerCAmelCase__ : int ) -> Optional[int]:
__a = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = False
return [i for i in range(2 , lowerCAmelCase__ ) if is_prime[i]]
def lowercase ( lowerCAmelCase__ : int = 800800 , lowerCAmelCase__ : int = 800800 ) -> Optional[Any]:
__a = degree * loga(lowerCAmelCase__ )
__a = int(lowerCAmelCase__ )
__a = calculate_prime_numbers(lowerCAmelCase__ )
__a = 0
__a = 0
__a = len(lowerCAmelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 365 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __A :
a__ : List[Any] = LEDConfig
a__ : Optional[int] = {}
a__ : Tuple = """gelu"""
def __init__(self : List[Any] , __a : List[Any] , __a : Optional[int]=13 , __a : Optional[Any]=7 , __a : Dict=True , __a : Tuple=False , __a : Optional[Any]=99 , __a : Any=32 , __a : List[Any]=2 , __a : Optional[Any]=4 , __a : str=37 , __a : Any=0.1 , __a : Any=0.1 , __a : Dict=20 , __a : Union[str, Any]=2 , __a : Tuple=1 , __a : str=0 , __a : Tuple=4 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase_ = prepare_led_inputs_dict(__a , __a , __a )
UpperCAmelCase_ = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
UpperCAmelCase_ = global_attention_mask
return config, inputs_dict
def _lowercase (self : Optional[int] , __a : int , __a : int ):
UpperCAmelCase_ = TFLEDModel(config=__a ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__a , attention_mask=__a , use_cache=__a )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
UpperCAmelCase_ = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Dict , snake_case_ : int=None , snake_case_ : List[Any]=None , snake_case_ : Dict=None , snake_case_ : Union[str, Any]=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a__ : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
a__ : Optional[Any] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : Optional[Any] = True
a__ : Optional[Any] = False
a__ : Tuple = False
a__ : str = False
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFLEDModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a )
def _lowercase (self : str ):
self.config_tester.run_common_tests()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = tf.zeros_like(inputs_dict["attention_mask"] )
UpperCAmelCase_ = 2
UpperCAmelCase_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.model_tester.seq_length
UpperCAmelCase_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Union[str, Any] ):
UpperCAmelCase_ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : List[str] ):
UpperCAmelCase_ = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase (self : Union[str, Any] ):
pass
def _lowercase (self : Optional[Any] ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.constant(snake_case_ , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_: List[str] =1E-4
@slow
@require_tf
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
UpperCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
UpperCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = prepare_led_inputs_dict(model.config , __a , __a )
UpperCAmelCase_ = model(**__a )[0]
UpperCAmelCase_ = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
UpperCAmelCase_ = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
UpperCAmelCase_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase_ = prepare_led_inputs_dict(model.config , __a , __a )
UpperCAmelCase_ = model(**__a )[0]
UpperCAmelCase_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
UpperCAmelCase_ = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Tuple = 'deberta-v2'
def __init__( self : Any , __UpperCAmelCase : Optional[Any]=1_2_8_1_0_0 , __UpperCAmelCase : Optional[Any]=1_5_3_6 , __UpperCAmelCase : List[Any]=2_4 , __UpperCAmelCase : str=2_4 , __UpperCAmelCase : Optional[int]=6_1_4_4 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Any=1e-7 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Union[str, Any]="gelu" , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = max_relative_positions
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
SCREAMING_SNAKE_CASE__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE__ = pos_att_type
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kwargs.get("""pooler_hidden_size""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pooler_dropout
SCREAMING_SNAKE_CASE__ = pooler_hidden_act
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 1_2
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 165 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) -> Tuple:
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = num_stages
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = intermediate_size
_a = hidden_act
_a = type_sequence_label_size
_a = initializer_range
_a = out_features
_a = num_labels
_a = scope
_a = num_stages
def _UpperCAmelCase ( self ) -> str:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _UpperCAmelCase ( self ) -> Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A_ : Dict = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
A_ : Dict = False
A_ : Optional[Any] = False
A_ : List[str] = False
A_ : Optional[int] = False
A_ : List[str] = False
A_ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
_a = UperNetModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> int:
return
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_a = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(__UpperCAmelCase )
_a = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_a = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def _UpperCAmelCase ( self ) -> Any:
pass
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_a = Image.open(_lowerCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__UpperCAmelCase )
_a = prepare_img()
_a = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
_a = model(**__UpperCAmelCase )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_a = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> Tuple:
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__UpperCAmelCase )
_a = prepare_img()
_a = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
_a = model(**__UpperCAmelCase )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_a = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 153 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowerCAmelCase : Dict = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowerCAmelCase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowerCAmelCase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( __A, __A=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], __A )
| 65 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase__ = ['''text''', '''image''', '''audio''']
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(A_, A_ ):
inputs.append(create_inputs(A_ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( A_ : List ):
'''simple docstring'''
_lowerCamelCase : List[Any] = []
for output in outputs:
if isinstance(A_, (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(A_, (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(A_, (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __snake_case :
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
_lowerCamelCase : str = self.tool.inputs
for _input in inputs:
if isinstance(_input , __lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_lowerCamelCase : Tuple = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = create_inputs(self.tool.inputs )
_lowerCamelCase : Any = self.tool(*__lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
_lowerCamelCase : Optional[int] = [outputs]
self.assertListEqual(output_types(__lowerCAmelCase ) , self.tool.outputs )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = create_inputs(self.tool.inputs )
_lowerCamelCase : str = self.tool(*__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Any = [outputs]
self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__lowerCAmelCase , self.tool.outputs ):
_lowerCamelCase : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = create_inputs(self.tool.inputs )
_lowerCamelCase : Optional[int] = []
for _input, input_type in zip(__lowerCAmelCase , self.tool.inputs ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_lowerCamelCase : Tuple = self.tool(*__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[Any] = [outputs]
self.assertEqual(len(__lowerCAmelCase ) , len(self.tool.outputs ) )
| 175 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
def wrapper(*A_ : int, **A_ : List[str] ):
_lowerCamelCase : Tuple = timeit.default_timer()
_lowerCamelCase : Optional[Any] = func(*A_, **A_ )
_lowerCamelCase : Tuple = timeit.default_timer() - starttime
return delta
_lowerCamelCase : Optional[Any] = func.__name__
return wrapper
def snake_case_ ( A_ : dict, A_ : List[str]=1_00, A_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Optional[int] = seq_shapes or {}
for i in range(A_ ):
_lowerCamelCase : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(A_, _ArrayXD ):
_lowerCamelCase : str = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(A_, datasets.Value ):
if v.dtype == "string":
_lowerCamelCase : List[str] = '''The small grey turtle was surprisingly fast when challenged.'''
else:
_lowerCamelCase : int = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(A_, datasets.Sequence ):
while isinstance(A_, datasets.Sequence ):
_lowerCamelCase : Union[str, Any] = v.feature
_lowerCamelCase : Dict = seq_shapes[k]
_lowerCamelCase : int = np.random.rand(*A_ ).astype(v.dtype )
_lowerCamelCase : Optional[Any] = data
dummy_data.append((i, example) )
return dummy_data
def snake_case_ ( A_ : List[str], A_ : Optional[int], A_ : Optional[int]=1_00, A_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = generate_examples(A_, num_examples=A_, seq_shapes=A_ )
with ArrowWriter(features=A_, path=A_ ) as writer:
for key, record in dummy_data:
_lowerCamelCase : Optional[Any] = features.encode_example(A_ )
writer.write(A_ )
_lowerCamelCase , _lowerCamelCase : Dict = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_lowerCamelCase : List[Any] = datasets.Dataset.from_file(filename=A_, info=datasets.DatasetInfo(features=A_ ) )
return dataset
| 175 | 1 |
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
return "".join(chr(ord(SCREAMING_SNAKE_CASE__) - 32) if """a""" <= char <= """z""" else char for char in word)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 111 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """token-classification"""
def __init__( self : Union[str, Any] , A : List[Any] ):
if type(A ) == dict:
__snake_case: str = Namespace(**A )
__snake_case: str = import_module("""tasks""" )
try:
__snake_case: Tuple = getattr(A , hparams.task_type )
__snake_case: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
__snake_case: Optional[int] = self.token_classification_task.get_labels(hparams.labels )
__snake_case: str = CrossEntropyLoss().ignore_index
super().__init__(A , len(self.labels ) , self.mode )
def UpperCAmelCase__ ( self : Union[str, Any] , **A : Union[str, Any] ):
return self.model(**A )
def UpperCAmelCase__ ( self : Any , A : Optional[int] , A : str ):
__snake_case: str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: Any = self(**A )
__snake_case: Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = self.hparams
for mode in ["train", "dev", "test"]:
__snake_case: Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A )
__snake_case: Tuple = torch.load(A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__snake_case: List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , A )
__snake_case: Optional[int] = self.token_classification_task.convert_examples_to_features(
A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , A )
torch.save(A , A )
def UpperCAmelCase__ ( self : List[str] , A : int , A : int , A : bool = False ):
__snake_case: List[str] = self._feature_file(A )
logger.info("""Loading features from cached file %s""" , A )
__snake_case: int = torch.load(A )
__snake_case: Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case: Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__snake_case: Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__snake_case: Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__snake_case: Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A )
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] , A : int ):
"""Compute validation""" ""
__snake_case: List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: List[str] = self(**A )
__snake_case , __snake_case: int = outputs[:2]
__snake_case: List[str] = logits.detach().cpu().numpy()
__snake_case: Any = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__ ( self : Dict , A : Dict ):
__snake_case: Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__snake_case: Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__snake_case: List[str] = np.argmax(A , axis=2 )
__snake_case: Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__snake_case: Tuple = dict(enumerate(self.labels ) )
__snake_case: Dict = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case: Tuple = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__snake_case: Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(A , A ),
"""precision""": precision_score(A , A ),
"""recall""": recall_score(A , A ),
"""f1""": fa_score(A , A ),
}
__snake_case: Dict = dict(results.items() )
__snake_case: Dict = results
return ret, preds_list, out_label_list
def UpperCAmelCase__ ( self : Tuple , A : int ):
# when stable
__snake_case , __snake_case , __snake_case: List[str] = self._eval_end(A )
__snake_case: int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__ ( self : Dict , A : Optional[int] ):
# updating to test_epoch_end instead of deprecated test_end
__snake_case , __snake_case , __snake_case: int = self._eval_end(A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case: List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__ ( A : Any , A : int ):
# Add NER specific options
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCAmelCase : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Optional[int] = NERTransformer(args)
__UpperCAmelCase : Tuple = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__UpperCAmelCase : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 111 | 1 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10 ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or n < 0:
raise ValueError('Invalid input' )
SCREAMING_SNAKE_CASE = 10**n
SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , SCREAMING_SNAKE_CASE_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 38 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Any = ["""image"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Any = False
@property
def __A ( self ) -> Tuple:
return 32
@property
def __A ( self ) -> Optional[int]:
return 32
@property
def __A ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __A ( self ) -> Union[str, Any]:
return 8
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __A ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE = ShapERenderer(**lowerCAmelCase__ )
return model
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.dummy_prior
SCREAMING_SNAKE_CASE = self.dummy_image_encoder
SCREAMING_SNAKE_CASE = self.dummy_image_processor
SCREAMING_SNAKE_CASE = self.dummy_renderer
SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 38 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCamelCase ( __snake_case : str = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__snake_case ):
A__ : List[Any] =[d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__snake_case )[1] in (".py", ".ipynb"):
yield os.path.join(__snake_case, __snake_case ).lstrip("""./""" )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return f"{i * ' '}*" if i else "\n##"
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> str:
"""simple docstring"""
A__ : Optional[Any] =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__snake_case ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(__snake_case )} {new_part.replace('_', ' ' ).title()}" )
return new_path
def __lowerCamelCase ( __snake_case : str = "." ) -> None:
"""simple docstring"""
A__ : Any =""""""
for filepath in sorted(good_file_paths(__snake_case ) ):
A__ , A__ : Optional[int] =os.path.split(__snake_case )
if filepath != old_path:
A__ : Dict =print_path(__snake_case, __snake_case )
A__ : List[str] =(filepath.count(os.sep ) + 1) if filepath else 0
A__ : Union[str, Any] =f"{filepath}/{filename}".replace(""" """, """%20""" )
A__ : Optional[int] =os.path.splitext(filename.replace("""_""", """ """ ).title() )[0]
print(f"{md_prefix(__snake_case )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.')
| 134 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
A__ = json.load(_lowerCamelCase )
A__ = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A__ = torch.load(_lowerCamelCase , map_location="cpu" )["module"]
# Load the entity vocab file
A__ = load_original_entity_vocab(_lowerCamelCase )
# add an entry for [MASK2]
A__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A__ = AddedToken("<ent>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
A__ = AddedToken("<ent2>" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , "r" ) as f:
A__ = json.load(_lowerCamelCase )
A__ = "MLukeTokenizer"
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A__ = MLukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
A__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
A__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
A__ = state_dict["embeddings.word_embeddings.weight"]
A__ = word_emb[ent_init_index].unsqueeze(0 )
A__ = word_emb[enta_init_index].unsqueeze(0 )
A__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A__ = state_dict[bias_name]
A__ = decoder_bias[ent_init_index].unsqueeze(0 )
A__ = decoder_bias[enta_init_index].unsqueeze(0 )
A__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A__ = F"encoder.layer.{layer_index}.attention.self."
A__ = state_dict[prefix + matrix_name]
A__ = state_dict[prefix + matrix_name]
A__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A__ = state_dict["entity_embeddings.entity_embeddings.weight"]
A__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
A__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A__ = state_dict["entity_predictions.bias"]
A__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
A__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A__ = LukeForMaskedLM(config=_lowerCamelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
A__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
A__ = state_dict[key]
else:
A__ = state_dict[key]
A__, A__ = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if set(_lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(_lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A__ = MLukeTokenizer.from_pretrained(_lowerCamelCase , task="entity_classification" )
A__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
A__ = (0, 9)
A__ = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
A__ = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A__ = torch.Size((1, 33, 7_68) )
A__ = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A__ = torch.Size((1, 1, 7_68) )
A__ = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A__ = MLukeTokenizer.from_pretrained(_lowerCamelCase )
A__ = "Tokyo is the capital of <mask>."
A__ = (24, 30)
A__ = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
A__ = model(**_lowerCamelCase )
A__ = encoding["input_ids"][0].tolist()
A__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
A__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowerCamelCase )
A__ = outputs.entity_logits[0][0].argmax().item()
A__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
A__ = ["[MASK]", "[PAD]", "[UNK]"]
A__ = [json.loads(_lowerCamelCase ) for line in open(_lowerCamelCase )]
A__ = {}
for entry in data:
A__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A__ = entity_id
break
A__ = F"{language}:{entity_name}"
A__ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 123 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (PNDMScheduler,)
__lowercase = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ ( self :List[str] , **lowercase_ :Optional[int] )-> List[str]:
A__ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[int]=0 , **lowercase_ :Any )-> int:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
pass
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Optional[Any]=0 , **lowercase_ :List[str] )-> str:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :Optional[int] , **lowercase_ :Dict )-> Optional[int]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A__ = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self :Any )-> Dict:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> str:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A__ = 27
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCAmelCase_ ( self :List[str] )-> str:
with self.assertRaises(lowercase_ ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ ( self :int )-> str:
A__ = self.full_loop()
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = self.full_loop(prediction_type="v_prediction" )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def UpperCAmelCase_ ( self :Dict )-> Any:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 123 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( A_ ,A_):
if partitions <= 0:
raise ValueError("partitions must be a positive number!")
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!")
UpperCamelCase__: Tuple = number_of_bytes // partitions
UpperCamelCase__: int = []
for i in range(A_):
UpperCamelCase__: str = i * bytes_per_partition + 1
UpperCamelCase__: str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"{start_bytes}-{end_bytes}")
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
import os
from datetime import datetime as dt
from github import Github
A__: int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = Github(os.environ["GITHUB_TOKEN"])
UpperCamelCase__: Union[str, Any] = g.get_repo("huggingface/diffusers")
UpperCamelCase__: str = repo.get_issues(state="open")
for issue in open_issues:
UpperCamelCase__: Union[str, Any] = sorted(issue.get_comments() ,key=lambda A_: i.created_at ,reverse=A_)
UpperCamelCase__: Tuple = comments[0] if len(A_) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed")
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
| 149 | 1 |
from typing import Any
class _snake_case :
def __init__( self : Dict, __lowercase : Any ):
lowercase__ = data
lowercase__ = None
class _snake_case :
def __init__( self : Any ):
lowercase__ = None
def A__ ( self : int ):
lowercase__ = self.head
while temp is not None:
print(temp.data, end=" " )
lowercase__ = temp.next
print()
def A__ ( self : List[Any], __lowercase : Any ):
lowercase__ = Node(__lowercase )
lowercase__ = self.head
lowercase__ = new_node
def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[int] ):
if node_data_a == node_data_a:
return
else:
lowercase__ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ = node_a.next
lowercase__ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase__ = node_a.next
if node_a is None or node_a is None:
return
lowercase__ , lowercase__ = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 359 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""whisper"""
UpperCamelCase__ : Optional[int] =["""past_key_values"""]
UpperCamelCase__ : Optional[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any], __lowercase : List[str]=5_1865, __lowercase : Dict=80, __lowercase : List[Any]=6, __lowercase : Union[str, Any]=4, __lowercase : Tuple=6, __lowercase : Dict=4, __lowercase : List[Any]=1536, __lowercase : Tuple=1536, __lowercase : Any=0.0, __lowercase : List[str]=0.0, __lowercase : List[Any]=5_0257, __lowercase : List[str]=True, __lowercase : str=True, __lowercase : int="gelu", __lowercase : Tuple=256, __lowercase : Tuple=0.0, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=0.0, __lowercase : List[Any]=0.02, __lowercase : Union[str, Any]=False, __lowercase : str=1500, __lowercase : Optional[int]=448, __lowercase : Optional[Any]=5_0256, __lowercase : Tuple=5_0256, __lowercase : Any=5_0256, __lowercase : Union[str, Any]=None, __lowercase : Any=[220, 5_0256], __lowercase : List[Any]=False, __lowercase : int=256, __lowercase : int=False, __lowercase : Tuple=0.05, __lowercase : int=10, __lowercase : Dict=2, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=10, __lowercase : Union[str, Any]=0, __lowercase : Tuple=7, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, suppress_tokens=__lowercase, begin_suppress_tokens=__lowercase, **__lowercase, )
class _snake_case ( lowercase__):
@property
def A__ ( self : str ):
lowercase__ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ = {0: "batch"}
else:
lowercase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowercase, direction="inputs" )
return common_inputs
def A__ ( self : int, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional["TensorType"] = None, __lowercase : int = 2_2050, __lowercase : float = 5.0, __lowercase : int = 220, ):
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=__lowercase, framework=__lowercase, sampling_rate=__lowercase, time_duration=__lowercase, frequency=__lowercase, )
lowercase__ = encoder_inputs["input_features"].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer, __lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = encoder_inputs.pop("input_features" )
lowercase__ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def A__ ( self : int ):
return 1e-3
| 224 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ = logging.get_logger(__name__)
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__(self , **A ) -> Union[str, Any]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_a = deprecated_arg[3:]
setattr(self , A , not kwargs.pop(A ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
_a = kwargs.pop('''torchscript''' , self.torchscript )
_a = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
_a = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**A )
__lowerCamelCase : bool = field(default=A , metadata={'help': 'Trace the models using torchscript'} )
__lowerCamelCase : bool = field(default=A , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__lowerCamelCase : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a__ (self ) -> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
_a = torch.device('''cpu''' )
_a = 0
elif is_torch_tpu_available():
_a = xm.xla_device()
_a = 0
else:
_a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_a = torch.cuda.device_count()
return device, n_gpu
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def a__ (self ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a__ (self ) -> "torch.device":
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a__ (self ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return self.n_gpu > 0
| 211 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'xmod'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
_a = pre_norm
_a = adapter_reduction_factor
_a = adapter_layer_norm
_a = adapter_reuse_layer_norm
_a = ln_before_adapter
_a = list(A )
_a = default_language
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211 | 1 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : int=None , A : List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A )
@dataclass
class UpperCamelCase__:
__magic_name__ : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__magic_name__ : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__magic_name__ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Use FP16 to accelerate inference."} )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Benchmark training of model"} )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Verbose memory tracing"} )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Trace memory line by line"} )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Save result to a CSV file"} )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Save all print statements in a log file"} )
__magic_name__ : bool = field(default=lowerCAmelCase , metadata={"help": "Whether to print environment information"} )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__magic_name__ : str = field(
default=f'inference_time_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__magic_name__ : str = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__magic_name__ : str = field(
default=f'train_time_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__magic_name__ : str = field(
default=f'train_memory_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__magic_name__ : str = field(
default=f'env_info_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving environment information."} , )
__magic_name__ : str = field(
default=f'log_{round(time() )}.csv' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__magic_name__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__magic_name__ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def a__( self : List[Any] )-> Tuple:
"""simple docstring"""
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , lowerCAmelCase , )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 370 |
'''simple docstring'''
_lowercase : Any = range(2, 20 + 1)
_lowercase : str = [10**k for k in range(ks[-1] + 1)]
_lowercase : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase__ ( A : int , A : str , A : List[Any] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = sum(a_i[j] for j in range(A , len(A ) ) )
UpperCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
UpperCAmelCase , UpperCAmelCase = 0, 0
UpperCAmelCase = n - i
UpperCAmelCase = memo.get(A )
if sub_memo is not None:
UpperCAmelCase = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
UpperCAmelCase = -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase = _k
break
if max_jump >= 0:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase = diff + c
for j in range(min(A , len(A ) ) ):
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
if new_c > 0:
add(A , A , A )
else:
UpperCAmelCase = []
else:
UpperCAmelCase = {c: []}
UpperCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase , UpperCAmelCase = next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase , UpperCAmelCase = compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
UpperCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase__ ( A : Dict , A : Optional[int] , A : List[Any] , A : int ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase = i
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase = ds_c + ds_b
diff += addend
UpperCAmelCase = 0
for j in range(A ):
UpperCAmelCase = a_i[j] + addend
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def lowerCamelCase__ ( A : List[str] , A : Optional[int] , A : Optional[Any] ):
'''simple docstring'''
for j in range(A , len(A ) ):
UpperCAmelCase = digits[j] + addend
if s >= 10:
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
UpperCAmelCase = addend // 10 + quotient
else:
UpperCAmelCase = s
UpperCAmelCase = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
digits.append(A )
def lowerCamelCase__ ( A : int = 10**15 ):
'''simple docstring'''
UpperCAmelCase = [1]
UpperCAmelCase = 1
UpperCAmelCase = 0
while True:
UpperCAmelCase , UpperCAmelCase = next_term(A , 20 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase = 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 91 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Optional[int] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """deformable_detr"""
__lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=3_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=6 , lowerCAmelCase_=10_24 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=10_24 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=False , lowerCAmelCase_=3_00 , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.25 , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = backbone_config.get('model_type' )
_snake_case = CONFIG_MAPPING[backbone_model_type]
_snake_case = config_class.from_dict(_UpperCamelCase )
_snake_case = use_timm_backbone
_snake_case = backbone_config
_snake_case = num_channels
_snake_case = num_queries
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = init_xavier_std
_snake_case = encoder_layerdrop
_snake_case = auxiliary_loss
_snake_case = position_embedding_type
_snake_case = backbone
_snake_case = use_pretrained_backbone
_snake_case = dilation
# deformable attributes
_snake_case = num_feature_levels
_snake_case = encoder_n_points
_snake_case = decoder_n_points
_snake_case = two_stage
_snake_case = two_stage_num_proposals
_snake_case = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = mask_loss_coefficient
_snake_case = dice_loss_coefficient
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
_snake_case = focal_alpha
_snake_case = disable_custom_kernels
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.d_model
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_snake_case = self.backbone_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 42 |
import os
from datetime import datetime as dt
from github import Github
snake_case__ : Union[str, Any] = [
'good first issue',
'feature request',
'wip',
]
def _a ( ) -> List[Any]:
'''simple docstring'''
__A = Github(os.environ['''GITHUB_TOKEN'''] )
__A = g.get_repo('''huggingface/accelerate''' )
__A = repo.get_issues(state='''open''' )
for issue in open_issues:
__A = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase : i.created_at , reverse=lowerCamelCase )
__A = comments[0] if len(lowerCamelCase ) > 0 else None
__A = dt.utcnow()
__A = (current_time - issue.updated_at).days
__A = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 117 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowercase__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """retribert"""
def __init__( self , lowercase=3_05_22 , lowercase=7_68 , lowercase=8 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=True , lowercase=1_28 , lowercase=0 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase)
a__: Tuple = vocab_size
a__: Dict = hidden_size
a__: Union[str, Any] = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: Union[str, Any] = intermediate_size
a__: List[Any] = hidden_dropout_prob
a__: Tuple = attention_probs_dropout_prob
a__: int = max_position_embeddings
a__: Any = type_vocab_size
a__: Tuple = initializer_range
a__: str = layer_norm_eps
a__: Union[str, Any] = share_encoders
a__: List[str] = projection_dim
| 351 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = 0
a__: Dict = [0]
a__: int = [0]
a__: Optional[Any] = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0)
a__: str = [60]
a__: Dict = [10]
a__: List[str] = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: int = 3
a__: str = [1, 2, 3]
a__: Dict = [3, 2, 1]
a__: Optional[int] = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 5)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Any = 50
a__: Optional[int] = [60, 1_00, 1_20]
a__: str = [10, 20, 30]
a__: int = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 2_20)
if __name__ == "__main__":
unittest.main()
| 203 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
if number < 0:
return False
_SCREAMING_SNAKE_CASE : str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=18 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=None , ):
_SCREAMING_SNAKE_CASE : Any = size if size is not None else {"""shortest_edge""": 20}
_SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE : Dict = parent
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : Any = num_channels
_SCREAMING_SNAKE_CASE : Dict = image_size
_SCREAMING_SNAKE_CASE : Any = min_resolution
_SCREAMING_SNAKE_CASE : Any = max_resolution
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
_SCREAMING_SNAKE_CASE : Tuple = size
_SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
_SCREAMING_SNAKE_CASE : str = crop_size
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """do_center_crop""" ) )
self.assertTrue(hasattr(__snake_case , """crop_size""" ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
_SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : int = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : str = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 200 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = field(default_factory=lowerCamelCase__)
lowerCamelCase__ = field(default_factory=lowerCamelCase__)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(list(m.modules())) == 1 or isinstance(lowercase__, nn.Convad) or isinstance(lowercase__, nn.BatchNormad)
if has_not_submodules:
self.traced.append(lowercase__)
def __call__( self, __a):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(lowercase__)
[x.remove() for x in self.handles]
return self
@property
def snake_case__ ( self):
'''simple docstring'''
return list(filter(lambda __a: len(list(x.state_dict().keys())) > 0, self.traced))
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 1
lowerCamelCase__ = field(default_factory=lowerCamelCase__)
lowerCamelCase__ = field(default_factory=lowerCamelCase__)
lowerCamelCase__ = True
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Tracker(self.dest)(lowercase__).parametrized
_lowerCAmelCase : Union[str, Any] = Tracker(self.src)(lowercase__).parametrized
_lowerCAmelCase : Any = list(filter(lambda __a: type(lowercase__) not in self.src_skip, lowercase__))
_lowerCAmelCase : List[Any] = list(filter(lambda __a: type(lowercase__) not in self.dest_skip, lowercase__))
if len(lowercase__) != len(lowercase__) and self.raise_if_mismatch:
raise Exception(
f"Numbers of operations are different. Source module has {len(lowercase__)} operations while"
f" destination module has {len(lowercase__)}.")
for dest_m, src_m in zip(lowercase__, lowercase__):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = []
# - get the stem
feature_blocks.append(("conv1", model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block"), f"Unexpected layer name {k}"
_lowerCAmelCase : Optional[int] = len(lowercase__) + 1
feature_blocks.append((f"res{block_index}", v))
_lowerCAmelCase : Optional[int] = nn.ModuleDict(lowercase__)
def snake_case__ ( self, __a):
'''simple docstring'''
return get_trunk_forward_outputs(
lowercase__, out_feat_keys=lowercase__, feature_blocks=self._feature_blocks, )
class UpperCAmelCase_ ( lowerCamelCase__):
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = x.split("-")
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self, __a):
'''simple docstring'''
if x not in self:
_lowerCAmelCase : Union[str, Any] = self.convert_name_to_timm(lowercase__)
_lowerCAmelCase : Union[str, Any] = partial(lambda: (timm.create_model(lowercase__, pretrained=lowercase__).eval(), None))
else:
_lowerCAmelCase : List[str] = super().__getitem__(lowercase__)
return val
class UpperCAmelCase_ ( lowerCamelCase__):
def __getitem__( self, __a):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
_lowerCAmelCase : int = RegNetModel
else:
_lowerCAmelCase : Dict = RegNetForImageClassification
return val
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for from_key, to_key in keys:
_lowerCAmelCase : Tuple = from_state_dict[from_key].clone()
print(F"Copied key={from_key} to={to_key}" )
return to_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ):
'''simple docstring'''
print(F"Converting {name}..." )
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase : Any = from_model_func()
_lowerCAmelCase : str = our_model_func(A__ ).eval()
_lowerCAmelCase : Union[str, Any] = ModuleTransfer(src=A__ , dest=A__ , raise_if_mismatch=A__ )
_lowerCAmelCase : Any = torch.randn((1, 3, 224, 224) )
module_transfer(A__ )
if from_state_dict is not None:
_lowerCAmelCase : List[str] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_lowerCAmelCase : Optional[Any] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_lowerCAmelCase : Optional[int] = manually_copy_vissl_head(A__ , our_model.state_dict() , A__ )
our_model.load_state_dict(A__ )
_lowerCAmelCase : Optional[Any] = our_model(A__ , output_hidden_states=A__ )
_lowerCAmelCase : List[str] = (
our_outputs.logits if isinstance(A__ , A__ ) else our_outputs.last_hidden_state
)
_lowerCAmelCase : Tuple = from_model(A__ )
_lowerCAmelCase : Tuple = from_output[-1] if type(A__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_lowerCAmelCase : Any = our_outputs.hidden_states[-1]
assert torch.allclose(A__ , A__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=A__ , )
_lowerCAmelCase : int = 224 if "seer" not in name else 384
# we can use the convnext one
_lowerCAmelCase : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=A__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=A__ , )
print(F"Pushed {name}" )
def A ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCAmelCase : Tuple = 1_000
_lowerCAmelCase : Union[str, Any] = (1, num_labels)
_lowerCAmelCase : Any = "huggingface/label-files"
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : str = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="dataset" ) ) , "r" ) )
_lowerCAmelCase : int = {int(A__ ): v for k, v in idalabel.items()}
_lowerCAmelCase : str = idalabel
_lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = partial(A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ )
_lowerCAmelCase : Optional[Any] = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
_lowerCAmelCase : Union[str, Any] = NameToOurModelFuncMap()
_lowerCAmelCase : Optional[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCamelCase , _lowerCamelCase ) -> Tuple[nn.Module, Dict]:
_lowerCAmelCase : Any = torch.hub.load_state_dict_from_url(A__ , model_dir=str(A__ ) , map_location="cpu" )
_lowerCAmelCase : List[str] = model_func()
# check if we have a head, if yes add it
_lowerCAmelCase : Any = files["classy_state_dict"]["base_model"]["model"]
_lowerCAmelCase : str = model_state_dict["trunk"]
model.load_state_dict(A__ )
return model.eval(), model_state_dict["heads"]
# pretrained
_lowerCAmelCase : int = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : Tuple = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : Dict = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCAmelCase : Any = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_lowerCAmelCase : str = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : Optional[int] = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_lowerCAmelCase : Any = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_lowerCAmelCase : Any = partial(
A__ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A__ , A__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A__ , A__ , A__ , )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 358 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A_ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(A_ , 'num_encoder_blocks' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=4 , A_=[2, 2, 2, 2] , A_=[8, 4, 2, 1] , A_=[16, 32, 64, 128] , A_=[1, 4, 8, 16] , A_=[1, 2, 4, 8] , A_=True , A_=True , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.02 , A_=3 , A_=None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_encoder_blocks
UpperCamelCase = sr_ratios
UpperCamelCase = depths
UpperCamelCase = hidden_sizes
UpperCamelCase = downsampling_rates
UpperCamelCase = num_attention_heads
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = SegformerModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
UpperCamelCase = UpperCamelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = SegformerForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = SegformerForSemanticSegmentation(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(A_ )
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertGreater(result.loss , 0.0 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Tuple = True
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = SegformerModelTester(self )
UpperCamelCase = SegformerConfigTester(self , config_class=A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.attentions
UpperCamelCase = sum(self.model_tester.depths )
self.assertEqual(len(A_ ) , A_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(A_ ) , A_ )
# verify the first attentions (first block, first layer)
UpperCamelCase = (self.model_tester.image_size // 4) ** 2
UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCamelCase = (self.model_tester.image_size // 32) ** 2
UpperCamelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCamelCase = len(A_ )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 1 , len(A_ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(A_ ) , A_ )
# verify the first attentions (first block, first layer)
UpperCamelCase = (self.model_tester.image_size // 4) ** 2
UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A_ ):
continue
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.train()
UpperCamelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
UpperCamelCase = model(**A_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = SegformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# only resize + normalize
UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=A_ , align=A_ , do_random_crop=A_ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(A_ )
with torch.no_grad():
UpperCamelCase = model(A_ )
UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
# only resize + normalize
UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=A_ , align=A_ , do_random_crop=A_ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(A_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(A_ )
with torch.no_grad():
UpperCamelCase = model(A_ )
UpperCamelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A_ , atol=1e-1 ) )
@slow
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# only resize + normalize
UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=A_ , align=A_ , do_random_crop=A_ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
A_ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(A_ )
with torch.no_grad():
UpperCamelCase = model(A_ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(500, 300)] )
UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , A_ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
UpperCamelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , A_ )
| 222 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
UpperCamelCase = value_function
UpperCamelCase = unet
UpperCamelCase = scheduler
UpperCamelCase = env
UpperCamelCase = env.get_dataset()
UpperCamelCase = {}
for key in self.data.keys():
try:
UpperCamelCase = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase = {}
for key in self.data.keys():
try:
UpperCamelCase = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase = env.observation_space.shape[0]
UpperCamelCase = env.action_space.shape[0]
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __UpperCamelCase ( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
if type(A_ ) is dict:
return {k: self.to_torch(A_ ) for k, v in x_in.items()}
elif torch.is_tensor(A_ ):
return x_in.to(self.unet.device )
return torch.tensor(A_ , device=self.unet.device )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for key, val in cond.items():
UpperCamelCase = val.clone()
return x_in
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = x.shape[0]
UpperCamelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase = torch.full((batch_size,) , A_ , device=self.unet.device , dtype=torch.long )
for _ in range(A_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase = self.value_function(x.permute(0 , 2 , 1 ) , A_ ).sample
UpperCamelCase = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase = self.scheduler._get_variance(A_ )
UpperCamelCase = torch.exp(0.5 * posterior_variance )
UpperCamelCase = model_std * grad
UpperCamelCase = 0
UpperCamelCase = x.detach()
UpperCamelCase = x + scale * grad
UpperCamelCase = self.reset_xa(A_ , A_ , self.action_dim )
UpperCamelCase = self.unet(x.permute(0 , 2 , 1 ) , A_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase = self.scheduler.step(A_ , A_ , A_ , predict_epsilon=A_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
UpperCamelCase = self.reset_xa(A_ , A_ , self.action_dim )
UpperCamelCase = self.to_torch(A_ )
return x, y
def __call__( self , A_ , A_=64 , A_=32 , A_=2 , A_=0.1 ) -> List[str]:
"""simple docstring"""
# normalize the observations and create batch dimension
UpperCamelCase = self.normalize(A_ , 'observations' )
UpperCamelCase = obs[None].repeat(A_ , axis=0 )
UpperCamelCase = {0: self.to_torch(A_ )}
UpperCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase = randn_tensor(A_ , device=self.unet.device )
UpperCamelCase = self.reset_xa(A_ , A_ , self.action_dim )
UpperCamelCase = self.to_torch(A_ )
# run the diffusion process
UpperCamelCase , UpperCamelCase = self.run_diffusion(A_ , A_ , A_ , A_ )
# sort output trajectories by value
UpperCamelCase = y.argsort(0 , descending=A_ ).squeeze()
UpperCamelCase = x[sorted_idx]
UpperCamelCase = sorted_values[:, :, : self.action_dim]
UpperCamelCase = actions.detach().cpu().numpy()
UpperCamelCase = self.de_normalize(A_ , key='actions' )
# select the action with the highest value
if y is not None:
UpperCamelCase = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase = np.random.randint(0 , A_ )
UpperCamelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 222 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : int ):
__a : str = jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase (self : Optional[int] ):
__a : int = None
__a : List[str] = 2_0
__a : Union[str, Any] = self._get_uniform_logits(batch_size=2 , length=snake_case_ )
# tweak scores to not be uniform anymore
__a : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__a : Optional[Any] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__a : Any = jax.nn.softmax(snake_case_ , axis=-1 )
__a : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
__a : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
__a : str = jax.nn.softmax(temp_dist_warper_sharper(snake_case_ , scores.copy() , cur_len=snake_case_ ) , axis=-1 )
__a : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(snake_case_ , scores.copy() , cur_len=snake_case_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase (self : Tuple ):
__a : Any = None
__a : int = 1_0
__a : Union[str, Any] = 2
# create ramp distribution
__a : int = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, vocab_size) ).copy()
__a : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
__a : Dict = FlaxTopKLogitsWarper(3 )
__a : Optional[Any] = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__a : str = 5
__a : int = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__a : str = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, length) ).copy()
__a : Tuple = top_k_warp_safety_check(snake_case_ , snake_case_ , cur_len=snake_case_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase (self : Any ):
__a : Dict = None
__a : str = 1_0
__a : Tuple = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__a : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__a : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
__a : Tuple = np.exp(top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__a : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__a : List[Any] = np.broadcast_to(np.arange(snake_case_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__a : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__a : Any = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__a : Any = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase (self : int ):
__a : int = 2_0
__a : Union[str, Any] = 4
__a : str = 0
__a : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=snake_case_ )
# check that min length is applied at length 5
__a : Union[str, Any] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
__a : List[str] = 5
__a : Any = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : Optional[int] = min_dist_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__a : Optional[Any] = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : Any = 1_5
__a : Any = min_dist_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = 2_0
__a : Union[str, Any] = 4
__a : Optional[int] = 0
__a : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
# check that all scores are -inf except the bos_token_id score
__a : str = ids_tensor((batch_size, 1) , vocab_size=2_0 )
__a : str = 1
__a : Tuple = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : Optional[int] = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__a : Optional[Any] = 3
__a : Optional[Any] = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : Union[str, Any] = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = 2_0
__a : str = 4
__a : Optional[Any] = 0
__a : int = 5
__a : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__a : str = ids_tensor((batch_size, 4) , vocab_size=2_0 )
__a : int = 4
__a : Dict = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : List[Any] = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__a : List[str] = 3
__a : Union[str, Any] = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : Union[str, Any] = logits_processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
self.assertFalse(jnp.isinf(snake_case_ ).any() )
def lowerCAmelCase (self : int ):
__a : Dict = 4
__a : Optional[int] = 1_0
__a : int = 1_5
__a : Optional[Any] = 2
__a : Union[str, Any] = 1
__a : Dict = 1_5
# dummy input_ids and scores
__a : Tuple = ids_tensor((batch_size, sequence_length) , snake_case_ )
__a : Dict = input_ids.copy()
__a : List[Any] = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : Optional[Any] = scores.copy()
# instantiate all dist processors
__a : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
__a : Tuple = FlaxTopKLogitsWarper(3 )
__a : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__a : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=snake_case_ )
__a : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
__a : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
__a : List[str] = 1_0
# no processor list
__a : Optional[Any] = temp_dist_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Dict = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Optional[int] = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Union[str, Any] = min_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Union[str, Any] = bos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Union[str, Any] = eos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
# with processor list
__a : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__a : Dict = processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase (self : int ):
__a : List[Any] = 4
__a : Optional[Any] = 1_0
__a : Optional[Any] = 1_5
__a : int = 2
__a : Tuple = 1
__a : int = 1_5
# dummy input_ids and scores
__a : Union[str, Any] = ids_tensor((batch_size, sequence_length) , snake_case_ )
__a : List[str] = input_ids.copy()
__a : int = self._get_uniform_logits(snake_case_ , snake_case_ )
__a : Dict = scores.copy()
# instantiate all dist processors
__a : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__a : str = FlaxTopKLogitsWarper(3 )
__a : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__a : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=snake_case_ )
__a : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case_ )
__a : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case_ , eos_token_id=snake_case_ )
__a : int = 1_0
# no processor list
def run_no_processor_list(snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Any ):
__a : Any = temp_dist_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Optional[Any] = top_k_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Optional[int] = top_p_warp(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : Optional[int] = min_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : List[str] = bos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
__a : int = eos_dist_proc(snake_case_ , snake_case_ , cur_len=snake_case_ )
return scores
# with processor list
def run_processor_list(snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
__a : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__a : Dict = processor(snake_case_ , snake_case_ , cur_len=snake_case_ )
return scores
__a : Dict = jax.jit(snake_case_ )
__a : Union[str, Any] = jax.jit(snake_case_ )
__a : List[str] = jitted_run_no_processor_list(snake_case_ , snake_case_ , snake_case_ )
__a : str = jitted_run_processor_list(snake_case_ , snake_case_ , snake_case_ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 90 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ ={
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 90 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = (UniPCMultistepScheduler,)
lowercase = (("num_inference_steps", 25),)
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**__UpperCAmelCase )
return config
def UpperCAmelCase ( self , __UpperCAmelCase=0 , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = dict(self.forward_default_kwargs )
__UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase )
__UpperCamelCase = self.dummy_sample
__UpperCamelCase = 0.1 * sample
__UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase )
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
__UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
__UpperCamelCase = scheduler_class.from_pretrained(__UpperCAmelCase )
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
__UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase , __UpperCamelCase = sample, sample
for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
__UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , __UpperCAmelCase=0 , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = dict(self.forward_default_kwargs )
__UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase )
__UpperCamelCase = self.dummy_sample
__UpperCamelCase = 0.1 * sample
__UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
__UpperCamelCase = scheduler_class.from_pretrained(__UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
__UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
if scheduler is None:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase )
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase )
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
__UpperCamelCase = 10
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = dict(self.forward_default_kwargs )
__UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
__UpperCamelCase = self.dummy_sample
__UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(__UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , 'set_timesteps' ):
__UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
__UpperCamelCase = scheduler.timesteps[5]
__UpperCamelCase = scheduler.timesteps[6]
__UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
__UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
__UpperCamelCase = self.full_loop(scheduler=__UpperCAmelCase )
__UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
__UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase = self.full_loop(scheduler=__UpperCAmelCase )
__UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
__UpperCamelCase = self.full_loop(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
assert not torch.isnan(__UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=__UpperCAmelCase )
self.check_over_configs(lower_order_final=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.full_loop()
__UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
__UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0 )
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
__UpperCamelCase = 10
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
__UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase )
__UpperCamelCase = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 316 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : str = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["input_features", "is_longer"]
def __init__( self : Optional[int] , lowerCamelCase : Any=64 , lowerCamelCase : Dict=48000 , lowerCamelCase : Dict=480 , lowerCamelCase : Tuple=10 , lowerCamelCase : Optional[int]=1024 , lowerCamelCase : int=0.0 , lowerCamelCase : Any=False , lowerCamelCase : float = 0 , lowerCamelCase : float = 14000 , lowerCamelCase : int = None , lowerCamelCase : str = "fusion" , lowerCamelCase : str = "repeatpad" , **lowerCamelCase : Optional[int] , ) -> Dict:
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
__snake_case : Optional[Any] = top_db
__snake_case : Dict = truncation
__snake_case : Dict = padding
__snake_case : Optional[Any] = fft_window_size
__snake_case : Optional[Any] = (fft_window_size >> 1) + 1
__snake_case : Dict = hop_length
__snake_case : Optional[int] = max_length_s
__snake_case : Optional[int] = max_length_s * sampling_rate
__snake_case : Dict = sampling_rate
__snake_case : Optional[int] = frequency_min
__snake_case : Any = frequency_max
__snake_case : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm=lowerCamelCase , mel_scale="htk" , )
__snake_case : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case ( self : str ) -> Dict[str, Any]:
__snake_case : List[str] = copy.deepcopy(self.__dict__ )
__snake_case : List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case ( self : List[Any] , lowerCamelCase : np.array , lowerCamelCase : Optional[np.array] = None ) -> np.ndarray:
__snake_case : List[Any] = spectrogram(
lowerCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ) -> str:
__snake_case : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Tuple = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Tuple = [0]
# randomly choose index for each part
__snake_case : List[Any] = np.random.choice(ranges[0] )
__snake_case : int = np.random.choice(ranges[1] )
__snake_case : List[str] = np.random.choice(ranges[2] )
__snake_case : Dict = mel[idx_front : idx_front + chunk_frames, :]
__snake_case : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__snake_case : Tuple = mel[idx_back : idx_back + chunk_frames, :]
__snake_case : Optional[Any] = torch.tensor(mel[None, None, :] )
__snake_case : Optional[int] = torch.nn.functional.interpolate(
lowerCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : List[Any] = mel_shrink[0][0].numpy()
__snake_case : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case ( self : Any , lowerCamelCase : np.array , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__snake_case : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__snake_case : Tuple = len(lowerCamelCase ) - max_length
__snake_case : List[Any] = np.random.randint(0 , overflow + 1 )
__snake_case : Dict = waveform[idx : idx + max_length]
__snake_case : Optional[int] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__snake_case : Any = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__snake_case : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__snake_case : str = np.stack([mel, mel, mel, mel] , axis=0 )
__snake_case : Optional[Any] = False
else:
__snake_case : Any = self._random_mel_fusion(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : Tuple = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
__snake_case : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__snake_case : List[str] = int(max_length / len(lowerCamelCase ) )
__snake_case : Any = np.stack(np.tile(lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__snake_case : str = int(max_length / len(lowerCamelCase ) )
__snake_case : List[str] = np.stack(np.tile(lowerCamelCase , lowerCamelCase ) )
__snake_case : str = np.pad(lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
__snake_case : List[str] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__snake_case : Optional[int] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : str = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : Any , ) -> BatchFeature:
__snake_case : Union[str, Any] = truncation if truncation is not None else self.truncation
__snake_case : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__snake_case : Any = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__snake_case : str = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case : Tuple = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
__snake_case : str = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : Union[str, Any] = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
__snake_case : Optional[int] = [
self._get_input_mel(lowerCamelCase , max_length if max_length else self.nb_max_samples , lowerCamelCase , lowerCamelCase )
for waveform in raw_speech
]
__snake_case : Optional[int] = []
__snake_case : Any = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__snake_case : Optional[Any] = np.random.randint(0 , len(lowerCamelCase ) )
__snake_case : Union[str, Any] = True
if isinstance(input_mel[0] , lowerCamelCase ):
__snake_case : List[str] = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__snake_case : Any = [[longer] for longer in is_longer]
__snake_case : Tuple = {"input_features": input_mel, "is_longer": is_longer}
__snake_case : List[str] = BatchFeature(lowerCamelCase )
if return_tensors is not None:
__snake_case : Any = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 134 | 0 |
import math
def __magic_name__ ( __a : list , __a : int ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
UpperCamelCase__ = int(math.floor(math.sqrt(__a ) ) )
UpperCamelCase__ = 0
while arr[min(__a , __a ) - 1] < x:
UpperCamelCase__ = step
step += int(math.floor(math.sqrt(__a ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase__ = prev + 1
if prev == min(__a , __a ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
lowerCamelCase_ = int(input('''Enter the number to be searched:\n'''))
lowerCamelCase_ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'Number {x} is at index {res}')
| 244 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-1'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-2'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-3'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
super()._init_()
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase_ (self ):
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 244 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """vocab.txt"""}
_A = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_A = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : str = collections.OrderedDict()
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
lowerCAmelCase__ : List[str] = reader.readlines()
for index, token in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = token.rstrip("""\n""" )
lowerCAmelCase__ : Any = index
return vocab
class _lowerCamelCase ( a_ ):
def __init__( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]="<unk>" , UpperCamelCase : Union[str, Any]=2_00 ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = vocab
lowerCAmelCase__ : List[Any] = unk_token
lowerCAmelCase__ : Optional[Any] = max_input_chars_per_word
def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Any = list(UpperCamelCase )
if len(UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = []
while start < len(UpperCamelCase ):
lowerCAmelCase__ : Any = len(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = None
while start < end:
lowerCAmelCase__ : str = """""".join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase__ : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase )
lowerCAmelCase__ : Any = end
return sub_tokens
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = VOCAB_FILES_NAMES
_lowerCamelCase :Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :List[str] = ["input_ids", "attention_mask"]
_lowerCamelCase :str = False
def __init__( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple="<d>" , UpperCamelCase : int="</d>" , UpperCamelCase : List[str]="<s>" , UpperCamelCase : List[Any]="</s>" , UpperCamelCase : int="<pad>" , UpperCamelCase : List[Any]="<unk>" , UpperCamelCase : List[Any]="</n>" , UpperCamelCase : Dict="</_>" , UpperCamelCase : Optional[Any]="left" , **UpperCamelCase : List[Any] , ) -> Any:
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=UpperCamelCase , eod_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , unk_token=UpperCamelCase , line_token=UpperCamelCase , space_token=UpperCamelCase , padding_side=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Dict = bod_token
lowerCAmelCase__ : Any = eod_token
lowerCAmelCase__ : List[Any] = load_vocab(UpperCamelCase )
lowerCAmelCase__ : List[str] = self.encoder[space_token]
lowerCAmelCase__ : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase__ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase : x[1] ) )
lowerCAmelCase__ : Tuple = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.encoder["\n"]
@property
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = []
for x in jieba.cut(UpperCamelCase , cut_all=UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase ) )
return output_tokens
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = [i for i in token_ids if i >= 0]
lowerCAmelCase__ : Optional[Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
return token in self.encoder
def _lowerCAmelCase ( self : str , UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
return "".join(UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.decoder.get(UpperCamelCase , self.unk_token )
def _lowerCAmelCase ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCAmelCase__ : List[str] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
lowerCAmelCase__ : Optional[int] = 0
if " " in self.encoder:
lowerCAmelCase__ : Any = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase__ : Union[str, Any] = self.encoder["""\n"""]
del self.encoder["\n"]
lowerCAmelCase__ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase : x[1] ) )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase ))
return [1] + ([0] * len(UpperCamelCase ))
| 212 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A = """base_with_context"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCAmelCase__ : int = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : str = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : str = ly_weight["""attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : int = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Any = ly_weight["""attention"""]
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ : List[Any] = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = ly_weight["""self_attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCAmelCase__ : Dict = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCAmelCase__ : Tuple = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCAmelCase__ : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ : Optional[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCAmelCase__ : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_A = parser.parse_args()
main(args)
| 212 | 1 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__snake_case = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __lowerCAmelCase ( lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __lowerCAmelCase ( lowercase : int , lowercase : Dict ) -> Tuple:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : List[str] = False
elif args.student_type == "gpt2":
snake_case : Optional[int] = False
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : Optional[Any] = False
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case : Any = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowercase , required=lowercase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowercase , required=lowercase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowercase , choices=["distilbert", "roberta", "gpt2"] , required=lowercase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowercase , required=lowercase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowercase , type=lowercase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowercase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowercase , required=lowercase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowercase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowercase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowercase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowercase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowercase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowercase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowercase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowercase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowercase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowercase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowercase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowercase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=lowercase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowercase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowercase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowercase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=lowercase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowercase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowercase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowercase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowercase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowercase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowercase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowercase , default=4000 , help="Checkpoint interval." )
snake_case : str = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.student_type]
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case : int = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case : List[str] = tokenizer.all_special_tokens.index(lowercase )
snake_case : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
snake_case : Any = special_tok_ids
snake_case : Dict = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
snake_case : Optional[int] = pickle.load(lowercase )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
snake_case : Dict = pickle.load(lowercase )
snake_case : str = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case : str = 0.0 # do not predict special tokens
snake_case : Dict = torch.from_numpy(lowercase )
else:
snake_case : Tuple = None
snake_case : str = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
snake_case : str = student_config_class.from_pretrained(args.student_config )
snake_case : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
snake_case : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
snake_case : Union[str, Any] = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
snake_case : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case : Optional[Any] = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 203 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=12 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=0 , UpperCamelCase__=None , ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : Dict = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Optional[int] = use_labels
snake_case : Tuple = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Optional[Any] = projection_dim
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : int = intermediate_size
snake_case : str = dropout
snake_case : List[Any] = attention_dropout
snake_case : Any = max_position_embeddings
snake_case : List[Any] = initializer_range
snake_case : Any = scope
snake_case : Union[str, Any] = bos_token_id
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = None
if self.use_input_mask:
snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case : Tuple = input_mask.numpy()
snake_case ,snake_case : str = input_mask.shape
snake_case : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
snake_case : int = 1
snake_case : Tuple = 0
snake_case : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = TFBlipTextModel(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , training=UpperCamelCase__ )
snake_case : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Tuple = config_and_inputs
snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Any = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = BlipTextModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = TFBlipTextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase__ )
| 203 | 1 |
'''simple docstring'''
from collections import defaultdict
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = 1
A_ : List[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(a_ )
if ret % 2 == 0:
cuts.append(a_ )
return ret
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = 10, 9
UpperCamelCase__ : Any = defaultdict(list)
UpperCamelCase__ : dict[int, bool] = {}
UpperCamelCase__ : list[int] = []
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 164 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase__ : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase__ : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCamelCase__ : Optional[Any] = 'zero2'
UpperCamelCase__ : Optional[int] = 'zero3'
UpperCamelCase__ : Dict = [ZEROa, ZEROa]
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
A_ : int = parameterized.to_safe_name("""_""".join(str(a_ ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
UpperCamelCase__ : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = True , ) -> List[str]:
A_ : Union[str, Any] = models[model]
A_ : Tuple = self.run_trainer(
stage=_lowerCamelCase , model_name=_lowerCamelCase , eval_steps=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
self.do_checks(_lowerCamelCase )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = 1 , _lowerCamelCase = True , _lowerCamelCase = True , ) -> Any:
A_ : Dict = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowerCamelCase )
A_ : str = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : Union[str, Any] = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : Tuple = self.get_launcher(_lowerCamelCase )
A_ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 164 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : int ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) ->Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) ->str:
_SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase )
_SCREAMING_SNAKE_CASE = val
def lowerCamelCase ( __lowerCamelCase : Dict ) ->Union[str, Any]:
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ) ->int:
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=384 , qkv_bias=_UpperCAmelCase )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 1024
_SCREAMING_SNAKE_CASE = 4096
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 1024
else:
raise ValueError("""Should either find \'base\' or \'large\' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 1024
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(_UpperCAmelCase )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" , check_hash=_UpperCAmelCase )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(_UpperCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 58 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case :Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case :Tuple = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case :Optional[int] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case :int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 49 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __snake_case ( ):
__a = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=_UpperCAmelCase , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=_UpperCAmelCase , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=_UpperCAmelCase , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=_UpperCAmelCase , default=0 , help='''cuda_id.''' , )
__a = parser.parse_args()
return args
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
__a , __a = imgs[0].size
__a = Image.new('''RGB''' , size=(cols * w, rows * h) )
__a , __a = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="robotic cat with wings" , _UpperCAmelCase=7.5 , _UpperCAmelCase=50 , _UpperCAmelCase=1 , _UpperCAmelCase=42 , ):
__a = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
__a = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
__a = int(math.sqrt(_UpperCAmelCase ) )
__a = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__snake_case :Union[str, Any] = parse_args()
# Load models and create wrapper for stable diffusion
__snake_case :List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__snake_case :Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__snake_case :int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__snake_case :Tuple = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__snake_case :Union[str, Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__snake_case :Optional[int] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__snake_case :int = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__snake_case :str = unet.to(torch.device('''cuda''', args.cuda_id))
__snake_case :Optional[Any] = pipeline.to(unet.device)
__snake_case ,__snake_case :Optional[int] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__snake_case :Dict = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 131 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Tuple = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 131 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """xlm-prophetnet"""
lowercase_ : Union[str, Any] = ["""past_key_values"""]
lowercase_ : Dict = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self, lowerCamelCase = 0.1, lowerCamelCase = "gelu", lowerCamelCase = 3_05_22, lowerCamelCase = 10_24, lowerCamelCase = 40_96, lowerCamelCase = 12, lowerCamelCase = 16, lowerCamelCase = 40_96, lowerCamelCase = 12, lowerCamelCase = 16, lowerCamelCase = 0.1, lowerCamelCase = 0.1, lowerCamelCase = 5_12, lowerCamelCase = 0.0_2, lowerCamelCase = True, lowerCamelCase = True, lowerCamelCase = 0, lowerCamelCase = 2, lowerCamelCase = 32, lowerCamelCase = 1_28, lowerCamelCase = False, lowerCamelCase = 0.0, lowerCamelCase = True, lowerCamelCase = 0, lowerCamelCase = 1, lowerCamelCase = 2, **lowerCamelCase, ) -> Any:
"""simple docstring"""
_lowercase : Any = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : str = encoder_ffn_dim
_lowercase : Any = num_encoder_layers
_lowercase : Any = num_encoder_attention_heads
_lowercase : List[str] = decoder_ffn_dim
_lowercase : Dict = num_decoder_layers
_lowercase : Optional[int] = num_decoder_attention_heads
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = init_std # Normal(0, this parameter)
_lowercase : Any = activation_function
# parameters for xlmprophetnet
_lowercase : List[Any] = ngram
_lowercase : List[Any] = num_buckets
_lowercase : Optional[Any] = relative_max_distance
_lowercase : Optional[int] = disable_ngram_loss
_lowercase : Any = eps
# 3 Types of Dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[str] = activation_dropout
_lowercase : Optional[int] = dropout
_lowercase : Dict = use_cache
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, add_cross_attention=lowerCamelCase, decoder_start_token_id=lowerCamelCase, **lowerCamelCase, )
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.')
| 21 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ :
lowerCamelCase : str
lowerCamelCase : str =None
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
raise NotImplementedError
def SCREAMING_SNAKE_CASE__ ( self : int , a : Any , a : int , a : str , **a : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ):
"""simple docstring"""
return f"""`pip install {cls.pip_package or cls.name}`"""
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="optuna"
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
return is_optuna_available()
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Optional[int] , a : int , a : str , **a : Optional[Any] ):
"""simple docstring"""
return run_hp_search_optuna(a , a , a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, Any] ):
"""simple docstring"""
return default_hp_space_optuna(a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : int ="ray"
lowerCamelCase : Dict ="'ray[tune]'"
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
return is_ray_available()
def SCREAMING_SNAKE_CASE__ ( self : int , a : int , a : int , a : str , **a : List[Any] ):
"""simple docstring"""
return run_hp_search_ray(a , a , a , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : int ):
"""simple docstring"""
return default_hp_space_ray(a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] ="sigopt"
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
return is_sigopt_available()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Tuple , a : int , a : str , **a : List[Any] ):
"""simple docstring"""
return run_hp_search_sigopt(a , a , a , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Optional[Any] ):
"""simple docstring"""
return default_hp_space_sigopt(a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple ="wandb"
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
return is_wandb_available()
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : Union[str, Any] , a : int , a : str , **a : Optional[int] ):
"""simple docstring"""
return run_hp_search_wandb(a , a , a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[str] ):
"""simple docstring"""
return default_hp_space_wandb(a )
__UpperCAmelCase ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCamelCase__ ) > 0:
__lowerCamelCase = available_backends[0].name
if len(UpperCamelCase__ ) > 1:
logger.info(
f"""{len(UpperCamelCase__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 237 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
__UpperCAmelCase ={
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
__UpperCAmelCase ={
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCAmelCase ( ) -> Optional[int]:
__lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowerCamelCase = bs[:]
__lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =["input_ids", "attention_mask"]
def __init__( self : str , a : List[str] , a : Any , a : Optional[int]="replace" , a : List[Any]="<s>" , a : List[str]="</s>" , a : Dict="</s>" , a : str="<s>" , a : Union[str, Any]="<unk>" , a : Any="<pad>" , a : Union[str, Any]="<mask>" , a : Dict=False , **a : int , ):
"""simple docstring"""
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(a )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = errors # how to handle errors in decoding
__lowerCamelCase = bytes_to_unicode()
__lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = {}
__lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(a )
__lowerCamelCase = get_pairs(a )
if not pairs:
return token
while True:
__lowerCamelCase = min(a , key=lambda a : self.bpe_ranks.get(a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(a ):
try:
__lowerCamelCase = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(a )
__lowerCamelCase = new_word
if len(a ) == 1:
break
else:
__lowerCamelCase = get_pairs(a )
__lowerCamelCase = ''' '''.join(a )
__lowerCamelCase = word
return word
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = []
for token in re.findall(self.pat , a ):
__lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple ):
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Tuple ):
"""simple docstring"""
return self.decoder.get(a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = ''''''.join(a )
__lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '''\n''' )
__lowerCamelCase = 0
with open(a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Dict , a : Any=False , **a : Any ):
"""simple docstring"""
__lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
__lowerCamelCase = ''' ''' + text
return (text, kwargs)
| 237 | 1 |
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowercase = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase ( ):
"""simple docstring"""
try:
__lowercase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__lowercase = int(nums[0] )
__lowercase = int(nums[1] )
print(
F"greatest_common_divisor({num_a}, {num_a}) = "
F"{greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}" )
print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 210 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if "cls_token" in name:
lowercase : List[Any] = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowercase : Any = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowercase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase : Tuple = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase : int = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowercase : Tuple = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase : List[Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowercase : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase : List[str] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase : Dict = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowercase : Tuple = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowercase : int = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
lowercase : int = key.split(""".""" )
lowercase : List[str] = int(key_split[1] )
if "decoder_blocks" in key:
lowercase : Tuple = config.decoder_hidden_size
lowercase : int = """decoder.decoder_layers."""
if "weight" in key:
lowercase : List[Any] = val[:dim, :]
lowercase : Tuple = val[dim : dim * 2, :]
lowercase : List[Any] = val[-dim:, :]
elif "bias" in key:
lowercase : str = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Tuple = config.hidden_size
lowercase : Union[str, Any] = """vit.encoder.layer."""
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Dict = val[-dim:, :]
elif "bias" in key:
lowercase : Any = val[:dim]
lowercase : str = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Union[str, Any] = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : int = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase : Dict = 1_024
lowercase : str = 4_096
lowercase : Optional[Any] = 24
lowercase : Optional[Any] = 16
elif "huge" in checkpoint_url:
lowercase : int = 14
lowercase : List[Any] = 1_280
lowercase : int = 5_120
lowercase : List[Any] = 32
lowercase : Any = 16
lowercase : List[str] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Tuple = ViTMAEImageProcessor(size=config.image_size )
lowercase : Optional[int] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Union[str, Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowercase : Union[str, Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
lowercase : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
lowercase : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**SCREAMING_SNAKE_CASE__ )
lowercase : str = outputs.logits
if "large" in checkpoint_url:
lowercase : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowercase : Tuple = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowercase : List[str] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : List[Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 20 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: List[str] = 1000000 ) -> int:
'''simple docstring'''
__lowerCamelCase : Any = 1
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : List[str] = {1: 1}
for inputa in range(2 , __snake_case ):
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__lowerCamelCase : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__lowerCamelCase : Dict = counter
if counter > pre_counter:
__lowerCamelCase : int = inputa
__lowerCamelCase : Optional[int] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 361 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "deta"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=900 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Optional[Any]=1.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]="sine" , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=300 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : str=5 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=0.2_5 , **UpperCAmelCase : List[str] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[Any] = backbone_config.pop("model_type" )
__lowerCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : List[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = backbone_config
__lowerCamelCase : Any = num_queries
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[str] = encoder_ffn_dim
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : str = encoder_attention_heads
__lowerCamelCase : Dict = decoder_ffn_dim
__lowerCamelCase : Tuple = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Dict = dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : Optional[Any] = init_xavier_std
__lowerCamelCase : int = encoder_layerdrop
__lowerCamelCase : Dict = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : str = encoder_n_points
__lowerCamelCase : List[str] = decoder_n_points
__lowerCamelCase : List[str] = two_stage
__lowerCamelCase : Dict = two_stage_num_proposals
__lowerCamelCase : int = with_box_refine
__lowerCamelCase : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : str = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Tuple = giou_cost
# Loss coefficients
__lowerCamelCase : List[Any] = mask_loss_coefficient
__lowerCamelCase : Dict = dice_loss_coefficient
__lowerCamelCase : Any = bbox_loss_coefficient
__lowerCamelCase : Dict = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = eos_coefficient
__lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.d_model
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output
| 64 | 0 |
def a__ ( A_ ):
'''simple docstring'''
if len(A_ ) <= 1:
return [tuple(A_ )]
__magic_name__ = []
def generate(A_, A_ ):
__magic_name__ = [0] * n
res.append(tuple(A_ ) )
__magic_name__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__magic_name__ , __magic_name__ = arr[i], arr[0]
else:
__magic_name__ , __magic_name__ = arr[i], arr[c[i]]
res.append(tuple(A_ ) )
c[i] += 1
__magic_name__ = 0
else:
__magic_name__ = 0
i += 1
generate(len(A_ ), A_ )
return res
if __name__ == "__main__":
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 88 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger()
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self , snake_case ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
def __call__( self , snake_case ):
snake_case_ = Tracker(self.dest )(snake_case ).parametrized
snake_case_ = Tracker(self.src )(snake_case ).parametrized
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) )
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(snake_case )} operations while'''
F''' destination module has {len(snake_case )}.''' )
for dest_m, src_m in zip(snake_case , snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval()
snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
snake_case_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 1000
snake_case_ = (1, num_labels)
snake_case_ = 'huggingface/label-files'
snake_case_ = num_labels
snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
snake_case_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase_ = '▁'
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[str] = AlbertTokenizer
def __init__( self : Dict ,_snake_case : Dict=None ,_snake_case : Optional[Any]=None ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=True ,_snake_case : Optional[Any]=False ,_snake_case : Any="[CLS]" ,_snake_case : int="[SEP]" ,_snake_case : List[str]="<unk>" ,_snake_case : Any="[SEP]" ,_snake_case : Union[str, Any]="<pad>" ,_snake_case : Optional[int]="[CLS]" ,_snake_case : Optional[int]="[MASK]" ,**_snake_case : Optional[Any] ,) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = (
AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ,normalized=_snake_case )
if isinstance(_snake_case ,_snake_case )
else mask_token
)
super().__init__(
_snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,**_snake_case ,)
lowercase__ : List[Any] = do_lower_case
lowercase__ : Tuple = remove_space
lowercase__ : Optional[int] = keep_accents
lowercase__ : int = vocab_file
lowercase__ : List[Any] = False if not self.vocab_file else True
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : str ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[str] = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file ,_snake_case )
return (out_vocab_file,)
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _a ( a :Optional[int] , a :tuple , a :Path , a :int , a :Optional[Any] , a :Any , a :Union[str, Any] , a :int=False , ) -> Union[str, Any]:
output_path.parent.mkdir(parents=a , exist_ok=a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , use_external_data_format=a , enable_onnx_checker=a , opset_version=a , )
else:
export(
a , a , f=output_path.as_posix() , input_names=a , output_names=a , dynamic_axes=a , do_constant_folding=a , opset_version=a , )
@torch.no_grad()
def _a ( a :str , a :str , a :int , a :bool = False ) -> List[Any]:
a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
a = '''cpu'''
a = Path(a )
# VAE DECODER
a = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
a = vae_decoder.config.latent_channels
# forward only through the decoder part
a = vae_decoder.decode
onnx_export(
a , model_args=(
torch.randn(1 , a , 25 , 25 ).to(device=a , dtype=a ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22 | 0 |
import os
def a ( SCREAMING_SNAKE_CASE_ : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) as input_file:
UpperCamelCase : int = [
[int(SCREAMING_SNAKE_CASE_ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = len(matrix[0] )
UpperCamelCase : Union[str, Any] = [[-1 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = matrix[i][0]
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCamelCase : List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : List[str] = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Tuple = max_resolution
UpperCamelCase : List[str] = do_resize
UpperCamelCase : List[str] = size
UpperCamelCase : int = apply_ocr
def _lowercase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
UpperCamelCase : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase : Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
UpperCamelCase : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 315 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Any = input("""Enter message: """ )
_a : Any = int(input(F"""Enter key [2-{len(__a ) - 1}]: """ ) )
_a : Dict = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
_a : Optional[int] = encrypt_message(__a , __a )
elif mode.lower().startswith("""d""" ):
_a : Optional[Any] = decrypt_message(__a , __a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = [""""""] * key
for col in range(__a ):
_a : List[str] = col
while pointer < len(__a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__a )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = math.ceil(len(__a ) / key )
_a : Tuple = key
_a : Dict = (num_cols * num_rows) - len(__a )
_a : Any = [""""""] * num_cols
_a : Dict = 0
_a : str = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_a : Optional[int] = 0
row += 1
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 294 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A : Dict = Lock()
def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def __lowerCamelCase ( __a :List[str] ) -> int:
"""simple docstring"""
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(__a ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*__a )
A__ = odd_even_transposition(__a )
print("""Sorted List\n""" )
print(*__a )
if __name__ == "__main__":
main()
| 274 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase : int = pd.read_csv("""sample_data.csv""", header=None)
lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase : Optional[int] = df.iloc[:, 1:2]
lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
lowercase : Any = MinMaxScaler().fit_transform(actual_data)
lowercase : Dict = 10
lowercase : List[str] = 5
lowercase : Any = 20
lowercase : Optional[int] = len_data - periods * look_back
lowercase : Optional[int] = actual_data[:division]
lowercase : Optional[int] = actual_data[division - look_back :]
lowercase : Tuple = [], []
lowercase : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase : List[str] = np.array(train_x)
lowercase : str = np.array(test_x)
lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
lowercase : str = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowercase : str = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowercase : str = model.predict(x_test)
| 370 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
# vision encoder
if "img_encoder.pos_embed" in name:
lowercase : Dict = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase : Any = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase : Tuple = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase : Dict = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase : Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase : Tuple = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase : Tuple = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase : List[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase : Tuple = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase : Any = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase : Dict = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase : List[str] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase : Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase : str = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase : Union[str, Any] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase : Tuple = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase : Optional[Any] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase : List[str] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase : Tuple = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : Dict = key.split(""".""" )
lowercase , lowercase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowercase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowercase : str = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Dict = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : int = key.split(""".""" )
lowercase : Tuple = int(key_split[3] )
lowercase : str = config.text_config.hidden_size
if "weight" in key:
lowercase : Optional[int] = val[:dim, :]
lowercase : Optional[Any] = val[
dim : dim * 2, :
]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Optional[int] = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : List[str] = val[-dim:]
else:
lowercase : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase : str = val.squeeze_()
else:
lowercase : Any = val
return orig_state_dict
def _snake_case( ) -> List[Any]:
lowercase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__=False ) -> str:
lowercase : Dict = GroupViTConfig()
lowercase : Tuple = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : List[str] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
lowercase : Tuple = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase : Optional[Any] = prepare_img()
lowercase : List[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
lowercase : Optional[int] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase : int = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : int = 16_000 ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = int(round(sample_rate * max_length ) )
if len(_UpperCAmelCase ) <= sample_length:
return wav
_UpperCAmelCase : Optional[Any] = randint(0 , len(_UpperCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Optional[str] = field(default=snake_case__ , metadata={"help": "Name of a dataset from the datasets package"} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "A file containing the training audio paths and labels."} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "A file containing the validation audio paths and labels."} )
__UpperCamelCase: str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__UpperCamelCase: str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
__UpperCamelCase: str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
__UpperCamelCase: str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__UpperCamelCase: Optional[int] = field(
default=snake_case__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase: Optional[int] = field(
default=snake_case__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase: float = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__UpperCamelCase: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Name or path of preprocessor config."} )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__UpperCamelCase: Optional[bool] = field(
default=snake_case__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _A ( self : str ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCAmelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCAmelCase : Union[str, Any] = DatasetDict()
_UpperCAmelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCAmelCase : Any = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCAmelCase : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(_UpperCAmelCase : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = []
for audio in batch[data_args.audio_column_name]:
_UpperCAmelCase : int = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCAmelCase )
_UpperCAmelCase : str = feature_extractor(_UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase : Dict = {model_input_name: inputs.get(_UpperCAmelCase )}
_UpperCAmelCase : Tuple = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCAmelCase : Any ):
_UpperCAmelCase : str = [audio["array"] for audio in batch[data_args.audio_column_name]]
_UpperCAmelCase : List[Any] = feature_extractor(_UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase : str = {model_input_name: inputs.get(_UpperCAmelCase )}
_UpperCAmelCase : List[str] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : int = raw_datasets["train"].features[data_args.label_column_name].names
_UpperCAmelCase , _UpperCAmelCase : Any = {}, {}
for i, label in enumerate(_UpperCAmelCase ):
_UpperCAmelCase : Union[str, Any] = str(_UpperCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : Dict = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : Optional[int] ):
_UpperCAmelCase : str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCAmelCase , references=eval_pred.label_ids )
_UpperCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCAmelCase ) , labelaid=_UpperCAmelCase , idalabel=_UpperCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : Dict = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[str] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCAmelCase , output_all_columns=_UpperCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Tuple = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCAmelCase , output_all_columns=_UpperCAmelCase )
# Initialize our trainer
_UpperCAmelCase : Dict = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Tuple = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCAmelCase )
trainer.save_metrics("eval" , _UpperCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
if __name__ == "__main__":
main()
| 31 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = "encodec"
def __init__( self : Optional[int] , A : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , A : List[Any]=24000 , A : Union[str, Any]=1 , A : List[Any]=False , A : Optional[int]=None , A : int=None , A : str=128 , A : List[Any]=32 , A : List[Any]=1 , A : int=[8, 5, 4, 2] , A : Optional[int]="weight_norm" , A : List[Any]=7 , A : Any=7 , A : Dict=3 , A : Optional[int]=2 , A : Dict=True , A : Dict="reflect" , A : Any=2 , A : Dict=2 , A : str=1.0 , A : Optional[int]=1024 , A : Any=None , A : Any=True , **A : str , ):
_UpperCAmelCase : Optional[int] = target_bandwidths
_UpperCAmelCase : List[str] = sampling_rate
_UpperCAmelCase : Optional[int] = audio_channels
_UpperCAmelCase : str = normalize
_UpperCAmelCase : int = chunk_length_s
_UpperCAmelCase : str = overlap
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : int = num_filters
_UpperCAmelCase : Optional[Any] = num_residual_layers
_UpperCAmelCase : Optional[int] = upsampling_ratios
_UpperCAmelCase : int = norm_type
_UpperCAmelCase : List[Any] = kernel_size
_UpperCAmelCase : List[Any] = last_kernel_size
_UpperCAmelCase : List[Any] = residual_kernel_size
_UpperCAmelCase : List[str] = dilation_growth_rate
_UpperCAmelCase : Dict = use_causal_conv
_UpperCAmelCase : Tuple = pad_mode
_UpperCAmelCase : Tuple = compress
_UpperCAmelCase : List[str] = num_lstm_layers
_UpperCAmelCase : List[Any] = trim_right_ratio
_UpperCAmelCase : int = codebook_size
_UpperCAmelCase : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**A )
@property
def _A ( self : Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _A ( self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _A ( self : str ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 31 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__A = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
__A = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__A = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__A = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0.9 , lowerCamelCase__=3 , lowerCamelCase__=0.5 ) -> Union[str, Any]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
__lowerCamelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowercase_ ) , word_tokenize(lowercase_ ) , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
else:
__lowerCamelCase = [
meteor_score.single_meteor_score(lowercase_ , lowercase_ , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
return {"meteor": np.mean(lowercase_ )}
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :Tuple = tempfile.mkdtemp()
snake_case_ :Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case_ :List[str] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
snake_case_ :List[str] = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case , snake_case )
def lowerCAmelCase_ ( self: Dict , **snake_case: Any ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: List[str] ) -> Union[str, Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: Dict ) -> Any:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case_ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ :Dict = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
snake_case_ :Optional[Any] = self.get_tokenizer()
snake_case_ :Tuple = self.get_rust_tokenizer()
snake_case_ :Optional[int] = self.get_image_processor()
snake_case_ :List[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ :Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ :int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def lowerCAmelCase_ ( self: str ) -> int:
snake_case_ :List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ :List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ :Optional[Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
snake_case_ :int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case_ :Tuple = self.get_image_processor()
snake_case_ :List[str] = self.get_tokenizer()
snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Optional[Any] = self.prepare_image_inputs()
snake_case_ :Optional[int] = image_processor(snake_case , return_tensors="""np""" )
snake_case_ :Optional[Any] = processor(images=snake_case , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_ :List[str] = self.get_image_processor()
snake_case_ :List[Any] = self.get_tokenizer()
snake_case_ :Dict = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Optional[Any] = """lower newer"""
snake_case_ :int = processor(text=snake_case )
snake_case_ :Optional[Any] = tokenizer(snake_case , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case_ :Any = self.get_image_processor()
snake_case_ :Optional[int] = self.get_tokenizer()
snake_case_ :Union[str, Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Dict = """lower newer"""
snake_case_ :Dict = self.prepare_image_inputs()
snake_case_ :Dict = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_ :Optional[Any] = self.get_image_processor()
snake_case_ :Optional[Any] = self.get_tokenizer()
snake_case_ :str = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ :int = processor.batch_decode(snake_case )
snake_case_ :List[str] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case_ :List[Any] = self.get_image_processor()
snake_case_ :str = self.get_tokenizer()
snake_case_ :Optional[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :List[Any] = """lower newer"""
snake_case_ :Dict = self.prepare_image_inputs()
snake_case_ :int = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66 |
from __future__ import annotations
lowercase__ :Any = 1.60_21E-19 # units = C
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 0 |
def UpperCamelCase_( _snake_case : float , _snake_case : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 308 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = [randint(-10_00 ,10_00 ) for i in range(10 )]
A_ : int = randint(-50_00 ,50_00 )
return (arr, r)
_UpperCAmelCase = make_dataset()
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
for triplet in permutations(__lowerCAmelCase ,3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
arr.sort()
A_ : Optional[Any] = len(__lowerCAmelCase )
for i in range(n - 1 ):
A_ , A_ : Any = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Dict = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
A_ : Dict = '\ntriplet_sum1(*dataset)\n'
A_ : Tuple = '\ntriplet_sum2(*dataset)\n'
A_ : Union[str, Any] = repeat(setup=__lowerCAmelCase ,stmt=__lowerCAmelCase ,repeat=5 ,number=1_00_00 )
A_ : Optional[int] = repeat(setup=__lowerCAmelCase ,stmt=__lowerCAmelCase ,repeat=5 ,number=1_00_00 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 140 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase :Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["pixel_values"]
def __init__( self : int , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : Union[str, Any] , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 224}
__magic_name__ : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A , param_name='crop_size' )
__magic_name__ : Any = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : List[Any] = resample
__magic_name__ : str = do_center_crop
__magic_name__ : Optional[Any] = crop_size
__magic_name__ : List[Any] = do_rescale
__magic_name__ : Union[str, Any] = rescale_factor
__magic_name__ : int = do_normalize
__magic_name__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ : Dict = do_convert_rgb
def __lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
__magic_name__ : Optional[Any] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ : str = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
__magic_name__ : int = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Dict:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : int = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , **_A : List[str] , ) -> PIL.Image.Image:
__magic_name__ : Dict = do_resize if do_resize is not None else self.do_resize
__magic_name__ : List[str] = size if size is not None else self.size
__magic_name__ : str = get_size_dict(_A , param_name='size' , default_to_square=_A )
__magic_name__ : Dict = resample if resample is not None else self.resample
__magic_name__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__magic_name__ : List[str] = get_size_dict(_A , param_name='crop_size' , default_to_square=_A )
__magic_name__ : Any = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Tuple = image_mean if image_mean is not None else self.image_mean
__magic_name__ : str = image_std if image_std is not None else self.image_std
__magic_name__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ : List[str] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ : List[Any] = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ : Tuple = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : Tuple = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
__magic_name__ : List[str] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
__magic_name__ : Dict = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : Any = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Any = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 353 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowerCAmelCase :List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = {}
with open(lowerCAmelCase , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase ):
__magic_name__ : Optional[Any] = line.strip()
if line:
__magic_name__ : Optional[int] = line.split()
__magic_name__ : Any = line_number
__magic_name__ : Union[str, Any] = words[0]
__magic_name__ : Dict = value
return result
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
for attribute in key.split('.' ):
__magic_name__ : Optional[Any] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : Optional[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : List[Any] = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : List[str] = getattr(lowerCAmelCase , lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__magic_name__ : Tuple = hf_pointer
for attribute in hf_param_name.split('.' ):
__magic_name__ : str = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Union[str, Any] = shape_pointer.shape
# let's reduce dimension
__magic_name__ : int = value[0]
else:
__magic_name__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : Optional[Any] = value
elif weight_type == "weight_g":
__magic_name__ : List[str] = value
elif weight_type == "weight_v":
__magic_name__ : Optional[int] = value
elif weight_type == "bias":
__magic_name__ : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__magic_name__ : Optional[int] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = value
else:
__magic_name__ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : Dict = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : Union[str, Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__magic_name__ : Optional[Any] = '.'.join([key, hf_param_name] )
else:
__magic_name__ : int = key
__magic_name__ : int = value if 'lm_head' in full_key else value[0]
lowerCAmelCase :int = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None ):
"""simple docstring"""
__magic_name__ : Dict = False
for key, mapped_key in MAPPING.items():
__magic_name__ : int = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__magic_name__ : Union[str, Any] = True
if "*" in mapped_key:
__magic_name__ : List[Any] = name.split(lowerCAmelCase )[0].split('.' )[-2]
__magic_name__ : List[str] = mapped_key.replace('*' , lowerCAmelCase )
if "weight_g" in name:
__magic_name__ : str = 'weight_g'
elif "weight_v" in name:
__magic_name__ : Optional[int] = 'weight_v'
elif "bias" in name:
__magic_name__ : int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : List[str] = 'weight'
else:
__magic_name__ : Any = None
if hf_dict is not None:
rename_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return is_used
return is_used
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = fairseq_model.state_dict()
__magic_name__ : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__magic_name__ : Optional[int] = True
else:
__magic_name__ : str = load_wavaveca_layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Any = full_name.split('conv_layers.' )[-1]
__magic_name__ : int = name.split('.' )
__magic_name__ : Any = int(items[0] )
__magic_name__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=False ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : int = WavaVecaConfig.from_pretrained(lowerCAmelCase )
else:
__magic_name__ : List[str] = WavaVecaConfig()
if is_seq_class:
__magic_name__ : Any = read_txt_into_dict(lowerCAmelCase )
__magic_name__ : Optional[Any] = idalabel
__magic_name__ : Union[str, Any] = WavaVecaForSequenceClassification(lowerCAmelCase )
__magic_name__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
feature_extractor.save_pretrained(lowerCAmelCase )
elif is_finetuned:
if dict_path:
__magic_name__ : str = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ : Dict = target_dict.pad_index
__magic_name__ : Union[str, Any] = target_dict.bos_index
__magic_name__ : Union[str, Any] = target_dict.eos_index
__magic_name__ : Union[str, Any] = len(target_dict.symbols )
__magic_name__ : Dict = os.path.join(lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
__magic_name__ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ : Any = 0
__magic_name__ : Optional[int] = 1
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase , )
__magic_name__ : Tuple = True if config.feat_extract_norm == 'layer' else False
__magic_name__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
__magic_name__ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
__magic_name__ : Dict = WavaVecaForCTC(lowerCAmelCase )
else:
__magic_name__ : Tuple = WavaVecaForPreTraining(lowerCAmelCase )
if is_finetuned or is_seq_class:
__magic_name__ , __magic_name__ , __magic_name__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__magic_name__ : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__magic_name__ : Dict = fairseq.tasks.setup_task(lowerCAmelCase )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase )
__magic_name__ : Any = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowerCAmelCase :Dict = parser.parse_args()
lowerCAmelCase :Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 275 | 0 |
import os
def _A ( ):
UpperCamelCase :Dict = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) )
UpperCamelCase :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''triangle.txt''' )
with open(SCREAMING_SNAKE_CASE__ ) as f:
UpperCamelCase :List[Any] = f.readlines()
UpperCamelCase :Any = []
for line in triangle:
UpperCamelCase :int = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(SCREAMING_SNAKE_CASE__ ) )
a.append(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(len(a[i] ) ):
UpperCamelCase :Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCamelCase :Any = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 259 |
from math import factorial
__snake_case = {str(digit): factorial(digit) for digit in range(10)}
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : int = 60 , SCREAMING_SNAKE_CASE__ : int = 1000000 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCamelCase :Any = 0
# the cached sizes of the previous chains
UpperCamelCase :dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE__ ):
# The temporary set will contain the elements of the chain
UpperCamelCase :List[Any] = set()
UpperCamelCase :Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase :Optional[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE__ )
chain_set_length += 1
UpperCamelCase :List[Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase :Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 259 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase__ ( __lowerCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(__lowerCamelCase , __lowerCamelCase )
# Predict target for test data
_UpperCAmelCase : Union[str, Any] =xgb.predict(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =predictions.reshape(len(__lowerCamelCase ) , 1 )
return predictions
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int =fetch_california_housing()
_UpperCAmelCase , _UpperCAmelCase : List[str] =data_handling(__lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =train_test_split(
__lowerCamelCase , __lowerCamelCase , test_size=0.25 , random_state=1 )
_UpperCAmelCase : Optional[int] =xgboost(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__lowerCamelCase , __lowerCamelCase )}" )
print(f"Mean Square Error : {mean_squared_error(__lowerCamelCase , __lowerCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 242 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase =logging.get_logger(__name__)
lowercase ='T5Config'
def lowerCamelCase__ ( __lowerCamelCase : jnp.array , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =jnp.zeros_like(__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCAmelCase : List[Any] =shifted_input_ids.at[:, 0].set(__lowerCamelCase )
_UpperCAmelCase : str =jnp.where(shifted_input_ids == -1_0_0 , __lowerCamelCase , __lowerCamelCase )
return shifted_input_ids
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="mt5"
UpperCAmelCase =MTaConfig
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="mt5"
UpperCAmelCase =MTaConfig
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="mt5"
UpperCAmelCase =MTaConfig
| 242 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = CTRLTokenizer
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Union[str, Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
snake_case : Tuple = dict(zip(A , range(len(A ) ) ) )
snake_case : Union[str, Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
snake_case : List[Any] = {"""unk_token""": """<unk>"""}
snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def UpperCAmelCase ( self , **A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = """adapt react readapt apt"""
snake_case : Optional[Any] = """adapt react readapt apt"""
return input_text, output_text
def UpperCAmelCase ( self ) -> int:
snake_case : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : List[str] = """adapt react readapt apt"""
snake_case : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
snake_case : int = tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : str = tokens + [tokenizer.unk_token]
snake_case : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 124 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = XLMRobertaTokenizer
_snake_case = XLMRobertaTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Tuple = XLMRobertaTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> int:
snake_case : str = """<pad>"""
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = XLMRobertaTokenizer(A , keep_accents=A )
snake_case : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : Any = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : int = self.tokenizer_class.from_pretrained(A , **A )
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(A )
snake_case : Optional[Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : str = tokenizer_r.from_pretrained(A )
snake_case : Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Any = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(A )
snake_case : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
snake_case : Tuple = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Dict = tokenizer_r.from_pretrained(A )
snake_case : List[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@cached_property
def UpperCAmelCase ( self ) -> str:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCAmelCase ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
snake_case : Any = XLMRobertaTokenizer(f.name , keep_accents=A )
snake_case : List[Any] = pickle.dumps(A )
pickle.loads(A )
def UpperCAmelCase ( self ) -> Any:
if not self.test_rust_tokenizer:
return
snake_case : str = self.get_tokenizer()
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : str = """I was born in 92000, and this is falsé."""
snake_case : Optional[int] = tokenizer.tokenize(A )
snake_case : List[Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : List[Any] = tokenizer.encode(A , add_special_tokens=A )
snake_case : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
snake_case : Optional[Any] = self.get_rust_tokenizer()
snake_case : str = tokenizer.encode(A )
snake_case : Dict = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = """Hello World!"""
snake_case : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case : Tuple = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
# fmt: off
snake_case : Tuple = {"""input_ids""": [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 124 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 46 |
'''simple docstring'''
import random
def _lowerCAmelCase ( lowercase ) -> bool:
__lowerCAmelCase = num - 1
__lowerCAmelCase = 0
while s % 2 == 0:
__lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
__lowerCAmelCase = random.randrange(2 , num - 1 )
__lowerCAmelCase = pow(lowercase , lowercase , lowercase )
if v != 1:
__lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCAmelCase = i + 1
__lowerCAmelCase = (v**2) % num
return True
def _lowerCAmelCase ( lowercase ) -> bool:
if num < 2:
return False
__lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase )
def _lowerCAmelCase ( lowercase = 1024 ) -> int:
while True:
__lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase ):
return num
if __name__ == "__main__":
_a : Optional[int] = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 46 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
__lowerCamelCase : str = BlenderbotConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[int] = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : Dict =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : List[Any] =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : List[Any] =eos_token_id
UpperCAmelCase : Optional[int] =pad_token_id
UpperCAmelCase : Tuple =bos_token_id
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder()
UpperCAmelCase : Any =inputs_dict['''input_ids''']
UpperCAmelCase : str =input_ids[:1, :]
UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : Tuple =inputs_dict['''head_mask''']
UpperCAmelCase : List[Any] =1
# first forward pass
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Tuple =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =TFBlenderbotModelTester(self )
UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""]
__lowerCamelCase : Dict = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase : Optional[int] =self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 348 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 1 |
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ = 1e-12 , lowercase__ = 100 , ):
"""simple docstring"""
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase )
A = np.iscomplexobj(_lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A = False
A = 0
A = 0
A = 1e12
while not convergence:
# Multiple matrix by the vector.
A = np.dot(_lowerCAmelCase , _lowerCAmelCase )
# Normalize the resulting output vector.
A = w / np.linalg.norm(_lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A = vector.conj().T if is_complex else vector.T
A = np.dot(_lowerCAmelCase , np.dot(_lowerCAmelCase , _lowerCAmelCase ) )
# Check convergence.
A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A = True
A = lambda_
if is_complex:
A = np.real(lambda_ )
return lambda_, vector
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A = np.array([41, 4, 20] )
A = real_input_matrix.astype(np.complexaaa )
A = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A = real_input_matrix
A = real_vector
elif problem_type == "complex":
A = complex_input_matrix
A = complex_vector
# Our implementation.
A = power_iteration(_lowerCAmelCase , _lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A = np.linalg.eigh(_lowerCAmelCase )
# Last eigenvalue is the maximum one.
A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 371 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : int):
A = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Dict):
try:
delete_repo(token=cls._token , repo_id="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("test-config" , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="test-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id="test-config" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : int):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="valid_org/test-config-org" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
CustomConfig.register_for_auto_class()
A = CustomConfig(attribute=4_2)
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"})
A = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig")
self.assertEqual(new_config.attribute , 4_2)
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A = c.n_embd + 1 # int
A = c.resid_pdrop + 1.0 # float
A = not c.scale_attn_weights # bool
A = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.n_embd , "mismatch for key: n_embd")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.resid_pdrop , "mismatch for key: resid_pdrop")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.scale_attn_weights , "mismatch for key: scale_attn_weights")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.summary_type , "mismatch for key: summary_type")
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = PretrainedConfig()
A = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"])
A = [key for key, value in config_common_kwargs.items() if value == getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
if len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {", ".join(__SCREAMING_SNAKE_CASE)}.""")
def SCREAMING_SNAKE_CASE__ (self : Dict):
with self.assertRaises(__SCREAMING_SNAKE_CASE):
# config is in subfolder, the following should not work without specifying the subfolder
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder")
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert")
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : int):
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 5_0_0
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This test is for deprecated behavior and can be removed in v5
A = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = AutoConfig.from_pretrained("bert-base-cased")
A = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
A = 2
json.dump(configuration.to_dict() , open(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A = ["config.42.0.0.json"]
A = 7_6_8
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
shutil.move(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , os.path.join(__SCREAMING_SNAKE_CASE , "config.42.0.0.json"))
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 7_6_8)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
A = "v4.0.0"
A , A = new_transformers.models.auto.AutoConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A = "v3.0.0"
A = old_transformers.models.auto.AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(old_configuration.hidden_size , 7_6_8)
| 57 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a_ ( snake_case_ ):
'''simple docstring'''
def snake_case_( self ) -> Tuple:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(A ):
self.assertDictEqual(A , example_records[i] )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
_SCREAMING_SNAKE_CASE = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case_( self ) -> Union[str, Any]: # checks what happens with missing columns
_SCREAMING_SNAKE_CASE = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def snake_case_( self ) -> Optional[Any]: # checks if the type can be inferred from the second record
_SCREAMING_SNAKE_CASE = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = Dataset.from_list([] )
self.assertEqual(len(A ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 58 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) ->Union[str, Any]:
for attribute in key.split(""".""" ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE = value
else:
_SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
_SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
_SCREAMING_SNAKE_CASE = """bias"""
else:
_SCREAMING_SNAKE_CASE = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
_SCREAMING_SNAKE_CASE = name.split(""".""" )
_SCREAMING_SNAKE_CASE = int(items[0] )
_SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True ) ->Optional[int]:
if config_path is not None:
_SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE = target_dict.pad_index
_SCREAMING_SNAKE_CASE = target_dict.bos_index
_SCREAMING_SNAKE_CASE = target_dict.eos_index
_SCREAMING_SNAKE_CASE = len(target_dict.symbols )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = HubertForCTC(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertModel(__lowerCamelCase )
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowercase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 58 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( a__ : int | str ) -> bool:
_UpperCamelCase = str(a__ )
return n == n[::-1]
def lowercase ( a__ : int = 1000000 ) -> List[Any]:
_UpperCamelCase = 0
for i in range(1 , a__ ):
if is_palindrome(a__ ) and is_palindrome(bin(a__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 54 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 54 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = AudioLDMPipeline
A_ = TEXT_TO_AUDIO_PARAMS
A_ = TEXT_TO_AUDIO_BATCH_PARAMS
A_ = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
__a : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__a : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__a : List[str] = ClapTextModelWithProjection(__a )
__a : Any = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
__a : str = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
__a : Optional[int] = SpeechTaHifiGan(__a )
__a : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
if str(__a ).startswith('mps' ):
__a : List[str] = torch.manual_seed(__a )
else:
__a : Union[str, Any] = torch.Generator(device=__a ).manual_seed(__a )
__a : int = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Tuple = self.get_dummy_components()
__a : List[str] = AudioLDMPipeline(**__a )
__a : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Any = audioldm_pipe(**__a )
__a : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
__a : Dict = audio[:10]
__a : Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_dummy_components()
__a : Optional[Any] = AudioLDMPipeline(**__a )
__a : Any = audioldm_pipe.to(__a )
__a : List[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : Any = self.get_dummy_inputs(__a )
__a : Optional[int] = 3 * [inputs['prompt']]
# forward
__a : List[Any] = audioldm_pipe(**__a )
__a : str = output.audios[0]
__a : Any = self.get_dummy_inputs(__a )
__a : Optional[Any] = 3 * [inputs.pop('prompt' )]
__a : int = audioldm_pipe.tokenizer(
__a , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='pt' , )
__a : str = text_inputs['input_ids'].to(__a )
__a : Optional[int] = audioldm_pipe.text_encoder(
__a , )
__a : int = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a : Union[str, Any] = F.normalize(__a , dim=-1 )
__a : Optional[int] = prompt_embeds
# forward
__a : Union[str, Any] = audioldm_pipe(**__a )
__a : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_dummy_components()
__a : Optional[int] = AudioLDMPipeline(**__a )
__a : Union[str, Any] = audioldm_pipe.to(__a )
__a : Union[str, Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : str = self.get_dummy_inputs(__a )
__a : List[str] = 3 * ['this is a negative prompt']
__a : int = negative_prompt
__a : List[Any] = 3 * [inputs['prompt']]
# forward
__a : Dict = audioldm_pipe(**__a )
__a : str = output.audios[0]
__a : List[Any] = self.get_dummy_inputs(__a )
__a : List[str] = 3 * [inputs.pop('prompt' )]
__a : int = []
for p in [prompt, negative_prompt]:
__a : List[str] = audioldm_pipe.tokenizer(
__a , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='pt' , )
__a : Dict = text_inputs['input_ids'].to(__a )
__a : List[Any] = audioldm_pipe.text_encoder(
__a , )
__a : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__a : Optional[int] = F.normalize(__a , dim=-1 )
embeds.append(__a )
__a , __a : Union[str, Any] = embeds
# forward
__a : List[str] = audioldm_pipe(**__a )
__a : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Tuple = self.get_dummy_components()
__a : Optional[int] = PNDMScheduler(skip_prk_steps=__a )
__a : List[Any] = AudioLDMPipeline(**__a )
__a : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : List[str] = self.get_dummy_inputs(__a )
__a : str = 'egg cracking'
__a : Union[str, Any] = audioldm_pipe(**__a , negative_prompt=__a )
__a : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
__a : Tuple = audio[:10]
__a : List[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : int = self.get_dummy_components()
__a : Dict = PNDMScheduler(skip_prk_steps=__a )
__a : Any = AudioLDMPipeline(**__a )
__a : Dict = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : List[str] = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
__a : str = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__a : Tuple = 2
__a : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__a : Tuple = 2
__a : Dict = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__a : Optional[Any] = 2
__a : List[str] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : int = self.get_dummy_components()
__a : Optional[Any] = AudioLDMPipeline(**__a )
__a : Any = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : Dict = audioldm_pipe.vocoder.config.sampling_rate
__a : Union[str, Any] = self.get_dummy_inputs(__a )
__a : str = audioldm_pipe(audio_length_in_s=0.016 , **__a )
__a : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.016
__a : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **__a )
__a : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.032
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_dummy_components()
__a : int = AudioLDMPipeline(**__a )
__a : Optional[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : Optional[Any] = ['hey']
__a : Optional[Any] = audioldm_pipe(__a , num_inference_steps=1 )
__a : int = output.audios.shape
assert audio_shape == (1, 256)
__a : str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__a : Any = SpeechTaHifiGan(__a ).to(__a )
__a : str = audioldm_pipe(__a , num_inference_steps=1 )
__a : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self , __a , __a="cpu" , __a=torch.floataa , __a=0 ):
'''simple docstring'''
__a : Tuple = torch.Generator(device=__a ).manual_seed(__a )
__a : List[str] = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) )
__a : Tuple = torch.from_numpy(__a ).to(device=__a , dtype=__a )
__a : Optional[Any] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__a : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : Optional[Any] = self.get_inputs(__a )
__a : Optional[Any] = 25
__a : List[Any] = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_1920
__a : List[str] = audio[7_7230:7_7240]
__a : str = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__a : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__a : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__a : Optional[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
__a : Dict = self.get_inputs(__a )
__a : Tuple = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_1920
__a : int = audio[2_7780:2_7790]
__a : str = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__a : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 27 |
def _UpperCamelCase ( snake_case__ ) -> list:
__UpperCAmelCase : Dict = [0] * len(snake_case__ )
for i in range(1, len(snake_case__ ) ):
# use last results for better performance - dynamic programming
__UpperCAmelCase : Any = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__UpperCAmelCase : Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__UpperCAmelCase : Tuple = j
return prefix_result
def _UpperCamelCase ( snake_case__ ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=__a , default=__a , required=__a , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=__a , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=__a , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=__a , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=__a , default=0 , help='''cuda_id.''' , )
UpperCAmelCase : Optional[Any] =parser.parse_args()
return args
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
'''simple docstring'''
if not len(__a ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
UpperCAmelCase : Tuple =imgs[0].size
UpperCAmelCase : Dict =Image.new('''RGB''' , size=(cols * w, rows * h) )
UpperCAmelCase : Tuple =grid.size
for i, img in enumerate(__a ):
grid.paste(__a , box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase="robotic cat with wings" , __lowerCAmelCase=7.5 , __lowerCAmelCase=50 , __lowerCAmelCase=1 , __lowerCAmelCase=42 , )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =torch.Generator(pipeline.device ).manual_seed(__a )
UpperCAmelCase : Optional[int] =pipeline(
__a , guidance_scale=__a , num_inference_steps=__a , generator=__a , num_images_per_prompt=__a , ).images
UpperCAmelCase : Any =int(math.sqrt(__a ) )
UpperCAmelCase : List[Any] =image_grid(__a , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__snake_case = parse_args()
# Load models and create wrapper for stable diffusion
__snake_case = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__snake_case = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__snake_case = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__snake_case = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__snake_case = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__snake_case = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__snake_case = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__snake_case = unet.to(torch.device('''cuda''', args.cuda_id))
__snake_case = pipeline.to(unet.device)
__snake_case , __snake_case = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__snake_case = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 355 |
class __snake_case :
def __init__( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =n
UpperCAmelCase : Any =[None] * self.n
UpperCAmelCase : Tuple =0 # index of the first element
UpperCAmelCase : Union[str, Any] =0
UpperCAmelCase : str =0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def UpperCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase : Tuple =data
UpperCAmelCase : Optional[Any] =(self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase : int =self.array[self.front]
UpperCAmelCase : Any =None
UpperCAmelCase : Tuple =(self.front + 1) % self.n
self.size -= 1
return temp
| 78 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_lowercase : List[str] = False
_lowercase : List[str] = False
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> List[str]:
return TrainCommand(A__ )
class __magic_name__ ( lowerCamelCase__):
@staticmethod
def SCREAMING_SNAKE_CASE_ ( lowercase_ : ArgumentParser ):
lowercase_ : Dict = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=lowercase__ , required=lowercase__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=lowercase__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=lowercase__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=lowercase__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=lowercase__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=lowercase__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=lowercase__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=lowercase__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=lowercase__ , default="""bert-base-uncased""" , help="""Model\'s name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=lowercase__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=lowercase__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=lowercase__ , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=lowercase__ , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=lowercase__ )
def __init__( self : int , lowercase_ : Namespace ):
lowercase_ : List[Any] = logging.get_logger("""transformers-cli/training""" )
lowercase_ : List[str] = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=lowercase__ )
lowercase_ : Union[str, Any] = args.output
lowercase_ : Optional[Any] = args.column_label
lowercase_ : Optional[Any] = args.column_text
lowercase_ : Union[str, Any] = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowercase_ : Optional[int] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
lowercase_ : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase_ : Tuple = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
lowercase_ : str = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase_ : Union[str, Any] = args.validation_split
lowercase_ : int = args.train_batch_size
lowercase_ : str = args.valid_batch_size
lowercase_ : List[str] = args.learning_rate
lowercase_ : Dict = args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : Any ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 239 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# We need to create solution object to save path.
__lowercase = [[0 for _ in range(A__ )] for _ in range(A__ )]
__lowercase = run_maze(A__ , 0 , 0 , A__ )
if solved:
print('''\n'''.join(str(A__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = len(A__ )
# Final check point.
if i == j == (size - 1):
__lowercase = 1
return True
__lowercase = (not i < 0) and (not j < 0) # Check lower bounds
__lowercase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowercase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowercase = 1
# check for directions
if (
run_maze(A__ , i + 1 , A__ , A__ )
or run_maze(A__ , A__ , j + 1 , A__ )
or run_maze(A__ , i - 1 , A__ , A__ )
or run_maze(A__ , A__ , j - 1 , A__ )
):
return True
__lowercase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple ="""nllb-moe"""
UpperCamelCase__ : Dict =["""past_key_values"""]
UpperCamelCase__ : List[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str], __lowercase : str=12_8112, __lowercase : List[Any]=1024, __lowercase : int=12, __lowercase : Union[str, Any]=4096, __lowercase : Union[str, Any]=16, __lowercase : Tuple=12, __lowercase : int=4096, __lowercase : Any=16, __lowercase : List[str]=0.05, __lowercase : Optional[int]=0.05, __lowercase : int=True, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]="relu", __lowercase : str=1024, __lowercase : Optional[Any]=0.1, __lowercase : List[Any]=0.1, __lowercase : Union[str, Any]=0.0, __lowercase : Tuple=0.02, __lowercase : Optional[Any]=2, __lowercase : Optional[int]=True, __lowercase : List[Any]=False, __lowercase : Any="float32", __lowercase : Union[str, Any]=False, __lowercase : Optional[int]=128, __lowercase : Union[str, Any]=64, __lowercase : Optional[int]=4, __lowercase : Dict=4, __lowercase : Any=0.001, __lowercase : Optional[int]=0.001, __lowercase : int="all", __lowercase : Any=False, __lowercase : Optional[int]=False, __lowercase : Dict=1.0, __lowercase : int=0.2, __lowercase : str=1, __lowercase : Optional[Any]=0, __lowercase : Optional[int]=2, __lowercase : Any=False, **__lowercase : List[str], ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = router_z_loss_coef
lowercase__ = router_aux_loss_coef
lowercase__ = decoder_sparse_step
lowercase__ = encoder_sparse_step
lowercase__ = num_experts
lowercase__ = expert_capacity
lowercase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase__ = router_dtype
lowercase__ = router_ignore_padding_tokens
lowercase__ = batch_prioritized_routing
lowercase__ = second_expert_policy
lowercase__ = normalize_router_prob_before_dropping
lowercase__ = moe_eval_capacity_token_fraction
lowercase__ = moe_token_dropout
lowercase__ = output_router_logits
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, **__lowercase, )
| 224 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple ="""nllb-moe"""
UpperCamelCase__ : Dict =["""past_key_values"""]
UpperCamelCase__ : List[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str], __lowercase : str=12_8112, __lowercase : List[Any]=1024, __lowercase : int=12, __lowercase : Union[str, Any]=4096, __lowercase : Union[str, Any]=16, __lowercase : Tuple=12, __lowercase : int=4096, __lowercase : Any=16, __lowercase : List[str]=0.05, __lowercase : Optional[int]=0.05, __lowercase : int=True, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]="relu", __lowercase : str=1024, __lowercase : Optional[Any]=0.1, __lowercase : List[Any]=0.1, __lowercase : Union[str, Any]=0.0, __lowercase : Tuple=0.02, __lowercase : Optional[Any]=2, __lowercase : Optional[int]=True, __lowercase : List[Any]=False, __lowercase : Any="float32", __lowercase : Union[str, Any]=False, __lowercase : Optional[int]=128, __lowercase : Union[str, Any]=64, __lowercase : Optional[int]=4, __lowercase : Dict=4, __lowercase : Any=0.001, __lowercase : Optional[int]=0.001, __lowercase : int="all", __lowercase : Any=False, __lowercase : Optional[int]=False, __lowercase : Dict=1.0, __lowercase : int=0.2, __lowercase : str=1, __lowercase : Optional[Any]=0, __lowercase : Optional[int]=2, __lowercase : Any=False, **__lowercase : List[str], ):
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = router_z_loss_coef
lowercase__ = router_aux_loss_coef
lowercase__ = decoder_sparse_step
lowercase__ = encoder_sparse_step
lowercase__ = num_experts
lowercase__ = expert_capacity
lowercase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase__ = router_dtype
lowercase__ = router_ignore_padding_tokens
lowercase__ = batch_prioritized_routing
lowercase__ = second_expert_policy
lowercase__ = normalize_router_prob_before_dropping
lowercase__ = moe_eval_capacity_token_fraction
lowercase__ = moe_token_dropout
lowercase__ = output_router_logits
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, **__lowercase, )
| 224 | 1 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
a_ : Dict = logging.getLogger(__name__)
def a_ ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =git.Repo(search_parent_directories=__snake_case )
lowerCamelCase_ ={
'''repo_id''': str(__snake_case ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__snake_case , '''git_log.json''' ) , '''w''' ) as f:
json.dump(__snake_case , __snake_case , indent=4 )
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowerCamelCase_ =0
lowerCamelCase_ =-1
lowerCamelCase_ =True
lowerCamelCase_ =False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCamelCase_ =int(os.environ['''WORLD_SIZE'''] )
lowerCamelCase_ =int(os.environ['''N_GPU_NODE'''] )
lowerCamelCase_ =int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCamelCase_ =params.world_size // params.n_gpu_per_node
lowerCamelCase_ =params.global_rank // params.n_gpu_per_node
lowerCamelCase_ =True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCamelCase_ =1
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =1
lowerCamelCase_ =1
lowerCamelCase_ =False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCamelCase_ =params.node_id == 0 and params.local_rank == 0
lowerCamelCase_ =params.n_nodes > 1
# summary
lowerCamelCase_ =F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def a_ ( __snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 75 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "dummy_data"
snake_case_ = "datasets"
snake_case_ = False
def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,):
__A = 0
__A = dataset_name
__A = cache_dir
__A = use_local_dummy_data
__A = config
# download_callbacks take a single url as input
__A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__A = str(A )
# to be downloaded
__A = None
__A = None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._dummy_file is None:
__A = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : Tuple ):
__A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__A = cached_path(
A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A )
return os.path.join(A ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : Any ):
if self._bucket_url is None:
__A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A ,A ):
return self.create_dummy_data_dict(A ,A )
elif isinstance(A ,(list, tuple) ):
return self.create_dummy_data_list(A ,A )
else:
return self.create_dummy_data_single(A ,A )
def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ):
return path
def UpperCamelCase_ ( self : str ):
return {}
def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ):
__A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A ,A ):
for single_url in single_urls:
download_callback(A )
else:
__A = single_urls
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A ,A ):
__A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls]
else:
__A = single_urls
__A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) )
__A = value
# make sure that values are unique
if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ):
__A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url )
__A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__A = [data_url[0]] * len(A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A )
return dummy_data_list
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
def _iter_archive_members(A : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__A = Path(self.dummy_file ).parent
__A = path.relative_to(A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A )
__A = Path(A )
__A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
if not isinstance(A ,A ):
__A = [paths]
for path in paths:
if os.path.isfile(A ):
if os.path.basename(A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A ):
if os.path.basename(A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A ,A )
| 15 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any]=13 ,lowerCamelCase__ : List[Any]=7 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=99 ,lowerCamelCase__ : str=32 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : Union[str, Any]=4 ,lowerCamelCase__ : Optional[int]=64 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Union[str, Any]=512 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : str=0.02 ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Union[str, Any]=2 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : int=1 ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = q_groups
SCREAMING_SNAKE_CASE = k_groups
SCREAMING_SNAKE_CASE = v_groups
SCREAMING_SNAKE_CASE = post_attention_groups
SCREAMING_SNAKE_CASE = intermediate_groups
SCREAMING_SNAKE_CASE = output_groups
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : int ,lowerCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SqueezeBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case : List[str] = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : Dict = False
__snake_case : List[Any] = True
__snake_case : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 3) )
self.assertEqual(output.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-4 ) )
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: str = logging.get_logger(__name__)
lowerCAmelCase: Any = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class a__( UpperCAmelCase__ ):
lowercase__ = """xlm-roberta"""
def __init__( self : Union[str, Any] , __snake_case : Dict=3_05_22 , __snake_case : List[Any]=7_68 , __snake_case : Optional[int]=12 , __snake_case : List[str]=12 , __snake_case : List[Any]=30_72 , __snake_case : Dict="gelu" , __snake_case : int=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : str=5_12 , __snake_case : Dict=2 , __snake_case : List[Any]=0.02 , __snake_case : str=1e-1_2 , __snake_case : Optional[Any]=1 , __snake_case : Tuple=0 , __snake_case : Any=2 , __snake_case : Dict="absolute" , __snake_case : str=True , __snake_case : List[Any]=None , **__snake_case : Union[str, Any] , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
a : int = vocab_size
a : Optional[Any] = hidden_size
a : str = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Union[str, Any] = hidden_act
a : Dict = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Optional[int] = max_position_embeddings
a : Optional[int] = type_vocab_size
a : int = initializer_range
a : Union[str, Any] = layer_norm_eps
a : Optional[Any] = position_embedding_type
a : Tuple = use_cache
a : Dict = classifier_dropout
class a__( UpperCAmelCase__ ):
@property
def lowercase_ ( self : Any ):
if self.task == "multiple-choice":
a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 297 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
lowercase__ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
lowercase__ : Optional[Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase__ : str = f'''down_blocks.{i}.resnets.{j}.'''
lowercase__ : Union[str, Any] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase__ : Tuple = f'''down_blocks.{i}.attentions.{j}.'''
lowercase__ : Dict = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase__ : List[Any] = f'''up_blocks.{i}.resnets.{j}.'''
lowercase__ : int = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase__ : List[str] = f'''up_blocks.{i}.attentions.{j}.'''
lowercase__ : Tuple = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase__ : List[str] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowercase__ : Any = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase__ : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowercase__ : int = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase__ : Union[str, Any] = "mid_block.attentions.0."
lowercase__ : List[Any] = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase__ : Tuple = f'''mid_block.resnets.{j}.'''
lowercase__ : List[str] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
snake_case_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
snake_case_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase__ : Dict = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase__ : Any = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowercase__ : List[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase__ : Optional[int] = f'''down_blocks.{i}.downsamplers.0.'''
lowercase__ : Tuple = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase__ : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowercase__ : Optional[int] = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase__ : int = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowercase__ : Union[str, Any] = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase__ : Dict = f'''mid_block.resnets.{i}.'''
lowercase__ : int = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase__ : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
snake_case_ = {v: vae_state_dict[k] for k, v in mapping.items()}
snake_case_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
snake_case_ = reshape_weight_for_sd(_A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase__ : int = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
lowercase__ : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase__ : Tuple = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase__ : Dict = {"q": 0, "k": 1, "v": 2}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
snake_case_ = k[: -len(".q_proj.weight" )]
snake_case_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
snake_case_ = [None, None, None]
snake_case_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
snake_case_ = k[: -len(".q_proj.bias" )]
snake_case_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
snake_case_ = [None, None, None]
snake_case_ = v
continue
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = torch.cat(_A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = torch.cat(_A )
return new_state_dict
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
lowercase__ : Dict = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase__ : Tuple = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
lowercase__ : int = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
lowercase__ : Any = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase__ : str = load_file(unet_path, device="cpu")
else:
lowercase__ : Optional[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
lowercase__ : Any = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
lowercase__ : Tuple = load_file(vae_path, device="cpu")
else:
lowercase__ : Any = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
lowercase__ : Dict = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
lowercase__ : Union[str, Any] = load_file(text_enc_path, device="cpu")
else:
lowercase__ : Union[str, Any] = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
lowercase__ : Optional[int] = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
lowercase__ : Dict = convert_unet_state_dict(unet_state_dict)
lowercase__ : Any = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase__ : Dict = convert_vae_state_dict(vae_state_dict)
lowercase__ : Union[str, Any] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase__ : Optional[Any] = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase__ : Any = {"transformer." + k: v for k, v in text_enc_dict.items()}
lowercase__ : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase__ : List[str] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
lowercase__ : Tuple = convert_text_enc_state_dict(text_enc_dict)
lowercase__ : Any = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase__ : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase__ : Any = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase__ : Union[str, Any] = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 187 | 0 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __lowerCamelCase ) -> float:
if not nums:
raise ValueError('''List is empty''' )
return sum(__lowerCamelCase ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __get__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
UpperCAmelCase__ = '__cached_' + self.fget.__name__
UpperCAmelCase__ = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if cached is None:
UpperCAmelCase__ = self.fget(lowerCamelCase__ )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return cached
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(lowerCamelCase__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase__ = getattr(self ,class_fields[0].name )
UpperCAmelCase__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = first_field.items()
UpperCAmelCase__ = True
else:
try:
UpperCAmelCase__ = iter(lowerCamelCase__ )
UpperCAmelCase__ = True
except TypeError:
UpperCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase__ ):
if (
not isinstance(lowerCamelCase__ ,(list, tuple) )
or not len(lowerCamelCase__ ) == 2
or not isinstance(element[0] ,lowerCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
UpperCAmelCase__ = element[1]
elif first_field is not None:
UpperCAmelCase__ = first_field
else:
for field in class_fields:
UpperCAmelCase__ = getattr(self ,field.name )
if v is not None:
UpperCAmelCase__ = v
def __delitem__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Dict ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[Any] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str] ,lowerCamelCase__ : List[Any] ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __setitem__( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ):
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : List[str] ,lowerCamelCase__ : List[Any] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCamelCase__ : List[ContextManager] ):
UpperCAmelCase__ = context_managers
UpperCAmelCase__ = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase__ )
def __exit__( self : Union[str, Any] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Dict ):
self.stack.__exit__(*lowerCamelCase__ ,**lowerCamelCase__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = model_class.__name__
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase , lowerCamelCase = "" , lowerCamelCase = "." ):
def _flatten_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ):
for k, v in d.items():
UpperCAmelCase__ = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase , lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
UpperCAmelCase__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase ):
for base_class in inspect.getmro(lowerCamelCase ):
UpperCAmelCase__ = base_class.__module__
UpperCAmelCase__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 98 |
"""simple docstring"""
import argparse
__UpperCamelCase = '''docs/source/_static/js/custom.js'''
def UpperCAmelCase ( UpperCAmelCase ) -> int:
with open(UpperCAmelCase , encoding='utf-8' , newline='\n' ) as f:
snake_case_ = f.readlines()
snake_case_ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
snake_case_ = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
__UpperCamelCase = parser.parse_args()
update_custom_js(args.version)
| 69 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[Any] = ["""image_processor""", """tokenizer"""]
__SCREAMING_SNAKE_CASE :Dict = """Pix2StructImageProcessor"""
__SCREAMING_SNAKE_CASE :List[str] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : List[str] , a__ : Union[str, Any] , a__ : Any ):
__magic_name__ = False
super().__init__(a__ , a__ )
def __call__( self : List[str] , a__ : int=None , a__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a__ : bool = True , a__ : Union[bool, str, PaddingStrategy] = False , a__ : Union[bool, str, TruncationStrategy] = None , a__ : Optional[int] = None , a__ : Optional[int] = 2048 , a__ : int = 0 , a__ : Optional[int] = None , a__ : Optional[bool] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : Optional[Union[str, TensorType]] = None , **a__ : Optional[Any] , ):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__magic_name__ = self.tokenizer
__magic_name__ = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_token_type_ids=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__magic_name__ = self.image_processor(
a__ , return_tensors=a__ , max_patches=a__ , **a__ )
else:
# add pixel_values and bbox
__magic_name__ = self.image_processor(
a__ , return_tensors=a__ , max_patches=a__ , header_text=a__ , **a__ )
if text is not None and not self.image_processor.is_vqa:
__magic_name__ = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_token_type_ids=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
if "attention_mask" in text_encoding:
__magic_name__ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
__magic_name__ = text_encoding.pop('''input_ids''' )
else:
__magic_name__ = None
if text_encoding is not None:
encoding_image_processor.update(a__ )
return encoding_image_processor
def snake_case__ ( self : Dict , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : Optional[Any] , *a__ : str , **a__ : Union[str, Any] ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : str ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 362 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Union[str, Any]=7 , a__ : Dict=3 , a__ : Optional[Any]=18 , a__ : Optional[Any]=30 , a__ : Tuple=400 , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=False , ):
__magic_name__ = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_reduce_labels
def snake_case__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> str:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(dataset[0]['''file'''] )
__magic_name__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(ds[0]['''file'''] )
__magic_name__ = Image.open(ds[1]['''file'''] )
__magic_name__ = Image.open(ds[2]['''file'''] )
__magic_name__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
__magic_name__ = BeitImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Union[str, Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
__magic_name__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_batch_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def snake_case__ ( self : Any ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ = True
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 98 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , A_ : Union[str, Any] , A_ : Tuple=3 , A_ : Any=32 , A_ : Union[str, Any]=3 , A_ : str=10 , A_ : Tuple=[10, 20, 30, 40] , A_ : Tuple=[1, 1, 2, 1] , A_ : str=True , A_ : str=True , A_ : Optional[int]="relu" , A_ : Dict=3 , A_ : Optional[Any]=None , ) -> int:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(A_ )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = self.get_config()
return config, pixel_values
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__ ( self : Optional[int] , A_ : Union[str, Any] , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = FlaxRegNetModel(config=A_ )
lowerCamelCase_ = model(A_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self : List[Any] , A_ : Union[str, Any] , A_ : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = FlaxRegNetForImageClassification(config=A_ )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[int] ) -> None:
"""simple docstring"""
lowerCamelCase_ = FlaxRegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def a__ ( self : int ) -> int:
"""simple docstring"""
pass
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
def check_hidden_states_output(A_ : int , A_ : Optional[int] , A_ : Tuple ):
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(A_ , A_ , A_ )
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ = self._prepare_for_class(A_ , A_ )
lowerCamelCase_ = model_class(A_ )
@jax.jit
def model_jitted(A_ : List[Any] , **A_ : List[Any] ):
return model(pixel_values=A_ , **A_ )
with self.subTest('JIT Enabled' ):
lowerCamelCase_ = model_jitted(**A_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase_ = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=A_ , return_tensors='np' )
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 208 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''OwlViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , A_ : Tuple=None , A_ : Tuple=None , **A_ : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_ , A_ )
def __call__( self : List[str] , A_ : List[str]=None , A_ : List[Any]=None , A_ : Dict=None , A_ : Tuple="max_length" , A_ : int="np" , **A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
lowerCamelCase_ = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
lowerCamelCase_ = t + [' '] * (max_num_queries - len(A_ ))
lowerCamelCase_ = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def a__ ( self : Tuple , *A_ : Dict , **A_ : Dict ) -> Any:
"""simple docstring"""
return self.image_processor.post_process(*A_ , **A_ )
def a__ ( self : List[str] , *A_ : Any , **A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def a__ ( self : Any , *A_ : str , **A_ : List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def a__ ( self : Union[str, Any] , *A_ : Any , **A_ : Union[str, Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def a__ ( self : Optional[int] , *A_ : List[Any] , **A_ : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A_ , )
return self.image_processor_class
@property
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A_ , )
return self.image_processor
| 208 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__SCREAMING_SNAKE_CASE ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__SCREAMING_SNAKE_CASE ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__SCREAMING_SNAKE_CASE ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
def remove_articles(__SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Any = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(__SCREAMING_SNAKE_CASE , ' ' , __SCREAMING_SNAKE_CASE )
def white_space_fix(__SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(__SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__SCREAMING_SNAKE_CASE : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__SCREAMING_SNAKE_CASE ) ) ) )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
return int(normalize_answer(__SCREAMING_SNAKE_CASE ) == normalize_answer(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : List[Any] = [any(compute_exact(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return (sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )) * 1_00
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : int = [rgram for rgrams in rgramslist for rgram in rgrams]
lowercase_ : List[Any] = Counter(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = Counter(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
lowercase_ : Dict = scount * numref
lowercase_ : int = Counter(__SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter()
for cgram, ccount in cgramcounter.items():
lowercase_ : int = ccount * numref
# KEEP
lowercase_ : Any = sgramcounter_rep & cgramcounter_rep
lowercase_ : Any = keepgramcounter_rep & rgramcounter
lowercase_ : Optional[Any] = sgramcounter_rep & rgramcounter
lowercase_ : str = 0
lowercase_ : Optional[int] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase_ : int = 1
lowercase_ : Dict = 1
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : List[Any] = keeptmpscorea / len(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowercase_ : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowercase_ : Dict = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowercase_ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowercase_ : str = sgramcounter_rep - cgramcounter_rep
lowercase_ : Tuple = delgramcounter_rep - rgramcounter
lowercase_ : List[str] = sgramcounter_rep - rgramcounter
lowercase_ : int = 0
lowercase_ : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase_ : Dict = 1
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Optional[Any] = deltmpscorea / len(__SCREAMING_SNAKE_CASE )
# ADDITION
lowercase_ : Tuple = set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = set(__SCREAMING_SNAKE_CASE ) & set(__SCREAMING_SNAKE_CASE )
lowercase_ : int = set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase_ : Any = 1
lowercase_ : Optional[Any] = 1
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : str = addtmpscore / len(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : int = addtmpscore / len(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
lowercase_ : str = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = ssent.split(' ' )
lowercase_ : str = csent.split(' ' )
lowercase_ : List[Any] = []
lowercase_ : Tuple = []
lowercase_ : Optional[Any] = []
lowercase_ : Any = []
lowercase_ : Optional[int] = []
lowercase_ : int = []
lowercase_ : Any = []
lowercase_ : Dict = []
lowercase_ : List[str] = []
lowercase_ : Dict = []
for rsent in rsents:
lowercase_ : Any = rsent.split(' ' )
lowercase_ : Dict = []
lowercase_ : Dict = []
lowercase_ : List[str] = []
ragramslist.append(__SCREAMING_SNAKE_CASE )
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
lowercase_ : Union[str, Any] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 2:
lowercase_ : int = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 3:
lowercase_ : int = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(__SCREAMING_SNAKE_CASE )
ragramslist.append(__SCREAMING_SNAKE_CASE )
ragramslist.append(__SCREAMING_SNAKE_CASE )
ragramslist.append(__SCREAMING_SNAKE_CASE )
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
lowercase_ : Tuple = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 2:
lowercase_ : Optional[int] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 3:
lowercase_ : Optional[int] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(__SCREAMING_SNAKE_CASE )
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
lowercase_ : Any = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 2:
lowercase_ : Union[str, Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 3:
lowercase_ : int = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(__SCREAMING_SNAKE_CASE )
((lowercase_) , (lowercase_) , (lowercase_)) : List[Any] = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((lowercase_) , (lowercase_) , (lowercase_)) : Union[str, Any] = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((lowercase_) , (lowercase_) , (lowercase_)) : Dict = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((lowercase_) , (lowercase_) , (lowercase_)) : Dict = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowercase_ : str = sum([delascore, delascore, delascore, delascore] ) / 4
lowercase_ : Optional[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
lowercase_ : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : str = "13a" , __SCREAMING_SNAKE_CASE : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowercase_ : Tuple = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowercase_ : Optional[Any] = sacrebleu.metrics.bleu._get_tokenizer(__SCREAMING_SNAKE_CASE )()(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(__SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
lowercase_ : List[Any] = sacremoses.MosesTokenizer().tokenize(__SCREAMING_SNAKE_CASE , return_str=__SCREAMING_SNAKE_CASE , escape=__SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
lowercase_ : int = sacremoses.MosesTokenizer().penn_tokenize(__SCREAMING_SNAKE_CASE , return_str=__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[Any] = sentence
if not return_str:
lowercase_ : int = normalized_sent.split()
return normalized_sent
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not (len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )):
raise ValueError('Sources length must match predictions and references lengths.' )
lowercase_ : str = 0
for src, pred, refs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(__SCREAMING_SNAKE_CASE ) , normalize(__SCREAMING_SNAKE_CASE ) , [normalize(__SCREAMING_SNAKE_CASE ) for sent in refs] )
lowercase_ : Optional[int] = sari_score / len(__SCREAMING_SNAKE_CASE )
return 1_00 * sari_score
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]="exp" , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[str]=False , ):
lowercase_ : List[str] = len(references[0] )
if any(len(__SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase_ : Any = [[refs[i] for refs in references] for i in range(__SCREAMING_SNAKE_CASE )]
lowercase_ : Union[str, Any] = sacrebleu.corpus_bleu(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , smooth_method=__SCREAMING_SNAKE_CASE , smooth_value=__SCREAMING_SNAKE_CASE , force=__SCREAMING_SNAKE_CASE , lowercase=__SCREAMING_SNAKE_CASE , use_effective_order=__SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = {}
result.update({'sari': compute_sari(sources=__UpperCamelCase ,predictions=__UpperCamelCase ,references=__UpperCamelCase )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=__UpperCamelCase ,references=__UpperCamelCase )} )
result.update({'exact': compute_em(predictions=__UpperCamelCase ,references=__UpperCamelCase )} )
return result
| 213 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE =[
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 213 | 1 |
'''simple docstring'''
def __magic_name__ ( A = 1_0_0 ) -> int:
snake_case = 0
snake_case = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 332 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 1 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 101 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """linear"""
_SCREAMING_SNAKE_CASE = """cosine"""
_SCREAMING_SNAKE_CASE = """cosine_with_restarts"""
_SCREAMING_SNAKE_CASE = """polynomial"""
_SCREAMING_SNAKE_CASE = """constant"""
_SCREAMING_SNAKE_CASE = """constant_with_warmup"""
_SCREAMING_SNAKE_CASE = """piecewise_constant"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int = -1 ) -> Tuple:
"""simple docstring"""
return LambdaLR(lowerCAmelCase__ , lambda lowerCAmelCase__ : 1 , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int = -1 ) -> str:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1.0 , lowerCAmelCase__ ) )
return 1.0
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : str , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = rule_str.split(':' )
lowerCAmelCase_ : List[str] = int(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = float(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = value
lowerCAmelCase_ : str = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
def rule_func(lowerCAmelCase__ : int ) -> float:
lowerCAmelCase_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Dict = create_rules_function(lowerCAmelCase__ , lowerCAmelCase__ )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]=-1 ) -> Any:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 0.5 , lowerCAmelCase__ : int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : Any ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : str ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : int=-1 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : Any = lr_init - lr_end
lowerCAmelCase_ : int = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, SchedulerType] , lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : int = -1 , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = SchedulerType(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase__ , step_rules=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , num_cycles=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , power=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
| 224 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : str ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__lowerCAmelCase = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter a string """).strip()
UpperCamelCase__ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 102 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 1 |
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __A ( __lowerCAmelCase )-> dict[str, str]:
"""simple docstring"""
_UpperCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_UpperCAmelCase = remove_duplicates(key.upper() )
_UpperCAmelCase = len(__lowerCAmelCase )
# First fill cipher with key characters
_UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(__lowerCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowerCAmelCase ) , 26 ):
_UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_UpperCAmelCase = alphabet[i - offset]
_UpperCAmelCase = char
return cipher_alphabet
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(cipher_map.get(__lowerCAmelCase , __lowerCAmelCase ) for ch in message.upper() )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowerCAmelCase , __lowerCAmelCase ) for ch in message.upper() )
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = input('Enter message to encode or decode: ' ).strip()
_UpperCAmelCase = input('Enter keyword: ' ).strip()
_UpperCAmelCase = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_UpperCAmelCase = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_UpperCAmelCase = create_cipher_map(__lowerCAmelCase )
print(func(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]:
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Tuple = num_channels
lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = conv_kernel_size
lowercase__ : Dict = output_stride
lowercase__ : List[Any] = classifier_dropout_prob
lowercase__ : str = use_labels
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = scope
lowercase__ : List[Any] = width_multiplier
lowercase__ : Optional[int] = ffn_dropout
lowercase__ : int = attn_dropout
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Any = None
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : str = self.num_labels
lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> int:
lowercase__ : Tuple = MobileViTVaModelTester(self )
lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _lowerCAmelCase( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _lowerCAmelCase( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _lowerCAmelCase( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> str:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : Optional[int] = 5
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : str = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowerCAmelCase )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**__lowerCAmelCase )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = model.to(__lowerCAmelCase )
lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : str = prepare_img()
lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**__lowerCAmelCase )
lowercase__ : Tuple = outputs.logits
# verify the logits
lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : List[str] = model.to(__lowerCAmelCase )
lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowercase__ : int = prepare_img()
lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.logits.detach().cpu()
lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] )
lowercase__ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
lowercase__ : Union[str, Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
| 198 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = CodeGenTokenizer
lowerCAmelCase__ = CodeGenTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = {"add_prefix_space": True}
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowercase_ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ = {"""unk_token""": """<unk>"""}
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def A__ ( self , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = """lower newer"""
lowercase_ = """lower newer"""
return input_text, output_text
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ = """lower newer"""
lowercase_ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase_ = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase_ = tokens + [tokenizer.unk_token]
lowercase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def A__ ( self ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
lowercase_ = """lower newer"""
# Testing tokenization
lowercase_ = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
lowercase_ = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
lowercase_ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
lowercase_ = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
lowercase_ = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
lowercase_ = tokenizer.encode(snake_case_ , add_prefix_space=snake_case_ )
lowercase_ = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing the unknown token
lowercase_ = tokens + [rust_tokenizer.unk_token]
lowercase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
'''simple docstring'''
pass
def A__ ( self , UpperCAmelCase=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# Simple input
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ = ("""This is a simple input""", """This is a pair""")
lowercase_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="max_length" , )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowercase_ = ("""This is a simple input""", """This is a pair""")
lowercase_ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer(snake_case_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase_ = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors="np" )
lowercase_ = tokenizer(*snake_case_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase_ = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = """$$$"""
lowercase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case_ , add_bos_token=snake_case_ )
lowercase_ = """This is a simple input"""
lowercase_ = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer(snake_case_ )
lowercase_ = tokenizer(snake_case_ )
self.assertEqual(out_s.input_ids[0] , snake_case_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase_ = tokenizer.decode(out_s.input_ids )
lowercase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowercase_ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
lowercase_ = """\nif len_a > len_b: result = a\nelse: result = b"""
lowercase_ = tokenizer.encode(snake_case_ )
lowercase_ = ["""^#""", re.escape("<|endoftext|>" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
lowercase_ = tokenizer.decode(snake_case_ , truncate_before_pattern=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
| 371 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase__ = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
lowercase__ = 3e8 # unit of c : m * s^-1
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
UpperCAmelCase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
UpperCAmelCase : Dict = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
UpperCAmelCase : Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
'''simple docstring'''
import string
from math import logaa
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
UpperCAmelCase : Optional[Any] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase : Tuple = corpus_without_punctuation.split('\n' )
UpperCAmelCase : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCAmelCase_ ))
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return round(tf * idf , 3 )
| 151 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCamelCase )
lowerCAmelCase_ = -1
lowerCAmelCase_ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
lowerCAmelCase_ = model.generate(__lowerCamelCase, max_new_tokens=10, do_sample=__lowerCamelCase )
lowerCAmelCase_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase_ = TextStreamer(__lowerCamelCase )
model.generate(__lowerCamelCase, max_new_tokens=10, do_sample=__lowerCamelCase, streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase_ = cs.out[:-1]
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCamelCase )
lowerCAmelCase_ = -1
lowerCAmelCase_ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
lowerCAmelCase_ = model.generate(__lowerCamelCase, max_new_tokens=10, do_sample=__lowerCamelCase )
lowerCAmelCase_ = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase_ = TextIteratorStreamer(__lowerCamelCase )
lowerCAmelCase_ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase_ = Thread(target=model.generate, kwargs=__lowerCamelCase )
thread.start()
lowerCAmelCase_ = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCamelCase )
lowerCAmelCase_ = -1
lowerCAmelCase_ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
lowerCAmelCase_ = model.generate(__lowerCamelCase, max_new_tokens=10, do_sample=__lowerCamelCase )
lowerCAmelCase_ = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase_ = TextStreamer(__lowerCamelCase, skip_prompt=__lowerCamelCase )
model.generate(__lowerCamelCase, max_new_tokens=10, do_sample=__lowerCamelCase, streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase_ = cs.out[:-1]
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCamelCase )
lowerCAmelCase_ = -1
lowerCAmelCase_ = torch.ones((1, 5), device=__lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase_ = TextStreamer(__lowerCamelCase, skip_special_tokens=__lowerCamelCase )
model.generate(__lowerCamelCase, max_new_tokens=1, do_sample=__lowerCamelCase, streamer=__lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase_ = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase_ = tokenizer(__lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCamelCase )
lowerCAmelCase_ = -1
lowerCAmelCase_ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
lowerCAmelCase_ = TextIteratorStreamer(__lowerCamelCase, timeout=0.001 )
lowerCAmelCase_ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase_ = Thread(target=model.generate, kwargs=__lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCamelCase ):
lowerCAmelCase_ = """"""
for new_text in streamer:
streamer_text += new_text
| 368 |
def __UpperCamelCase ( _A ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 167 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase: Union[str, Any] = '''src/transformers'''
lowerCAmelCase: str = '''docs/source/en/tasks'''
def lowerCamelCase__ ( _A , _A , _A ):
with open(__lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Dict = f.readlines()
# Find the start prompt.
a : str = 0
while not lines[start_index].startswith(__lowercase ):
start_index += 1
start_index += 1
a : Dict = start_index
while not lines[end_index].startswith(__lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: Dict = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase: Optional[Any] = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase: Dict = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def lowerCamelCase__ ( _A ):
a : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
a : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__lowercase , set() )
a : int = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCamelCase__ ( _A , _A=False ):
a , a , a , a : List[str] = _find_text_in_file(
filename=os.path.join(__lowercase , __lowercase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a : List[Any] = get_model_list_for_task(__lowercase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase: str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase: Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 297 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53 | 0 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase : List[Any] = 10
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase_ = one_third - 1
elif array[two_third] < target:
lowercase_ = two_third + 1
else:
lowercase_ = one_third + 1
lowercase_ = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCAmelCase , one_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[int] = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase : Dict = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCAmelCase : Tuple = int(input("Enter the number to be found in the list:\n").strip())
UpperCAmelCase : str = ite_ternary_search(collection, target)
UpperCAmelCase : List[str] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print("Not found")
| 313 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Tuple = tmp_path / "cache"
lowerCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase : Union[str, Any] = SqlDatasetReader(
"dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ,keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : Tuple = tmp_path / "cache"
lowerCamelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
lowerCamelCase : int = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,features=_SCREAMING_SNAKE_CASE ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
lowerCamelCase : List[str] = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : str = tmp_path / "cache"
lowerCamelCase : str = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" )
lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=1 ).write()
lowerCamelCase : Optional[Any] = iter_sql_file(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = tmp_path / "cache"
lowerCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" )
lowerCamelCase : List[str] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=2 ).write()
lowerCamelCase : List[str] = iter_sql_file(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowerCamelCase : List[Any] = tmp_path / "cache"
lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" )
lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read()
with pytest.raises(_SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=0 ).write()
| 48 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
snake_case_ = 6378137.0
snake_case_ = 6356752.314245
snake_case_ = 6378137
def lowerCamelCase__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
__snake_case = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
__snake_case = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
__snake_case = radians(snake_case_ )
__snake_case = radians(snake_case_ )
# Equation
__snake_case = sin((phi_a - phi_a) / 2 )
__snake_case = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__snake_case = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
from pathlib import Path
import fire
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : int ) -> str:
__snake_case = Path(snake_case_ )
__snake_case = Path(snake_case_ )
dest_dir.mkdir(exist_ok=snake_case_ )
for path in src_dir.iterdir():
__snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
__snake_case = dest_dir.joinpath(path.name )
print(snake_case_ )
dest_path.open('''w''' ).write('''\n'''.join(snake_case_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 238 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
_snake_case = 'timesformer'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="divided_space_time" , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ , )-> List[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = num_frames
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = qkv_bias
__UpperCamelCase = attention_type
__UpperCamelCase = drop_path_rate
| 328 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
from manim import *
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(4 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
_lowerCAmelCase = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
_lowerCAmelCase = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
_lowerCAmelCase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(__snake_case ):
_lowerCAmelCase = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
_lowerCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 220 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = 1
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case )
return image
@property
def lowercase__ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__snake_case )
@property
def lowercase__ ( self : Union[str, Any] ) -> str:
def extract(*__snake_case : List[Any] , **__snake_case : Any ):
class UpperCAmelCase :
def __init__( self : Any ) -> Any:
_lowerCAmelCase = torch.ones([0] )
def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Dict:
self.pixel_values.to(__snake_case )
return self
return Out()
return extract
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase = 77
_lowerCAmelCase = self.dummy_image.to(__snake_case )
_lowerCAmelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case )
_lowerCAmelCase = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , )
_lowerCAmelCase = output.images
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , return_dict=__snake_case , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase = 77
_lowerCAmelCase = self.dummy_image.to(__snake_case )
# put models in fp16
_lowerCAmelCase = unet.half()
_lowerCAmelCase = vae.half()
_lowerCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case )
_lowerCAmelCase = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , image=__snake_case , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase = init_image.resize((7_60, 5_04) )
_lowerCAmelCase = """BAAI/AltDiffusion"""
_lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
_lowerCAmelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowerCAmelCase = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Tuple:
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase = init_image.resize((7_68, 5_12) )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_lowerCAmelCase = """BAAI/AltDiffusion"""
_lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 220 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def lowercase_ ( _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase__ : Dict = [144, 192, 240]
lowerCamelCase__ : List[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase__ : Union[str, Any] = [96, 120, 144]
lowerCamelCase__ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase__ : Optional[int] = [64, 80, 96]
lowerCamelCase__ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase__ : str = 0.05
lowerCamelCase__ : int = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
lowerCamelCase__ : int = 512
lowerCamelCase__ : int = 16
lowerCamelCase__ : Any = 21
lowerCamelCase__ : Union[str, Any] = "pascal-voc-id2label.json"
else:
lowerCamelCase__ : Dict = 1000
lowerCamelCase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowerCamelCase__ : str = "huggingface/label-files"
lowerCamelCase__ : List[str] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _A : List[str] , _A : Tuple=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
lowerCamelCase__ : Any = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
lowerCamelCase__ : Tuple = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
lowerCamelCase__ : List[str] = name.replace(".block." , "." )
if "exp_1x1" in name:
lowerCamelCase__ : List[Any] = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
lowerCamelCase__ : int = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
lowerCamelCase__ : Dict = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
lowerCamelCase__ : Union[str, Any] = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
lowerCamelCase__ : Union[str, Any] = name.replace(".norm." , ".normalization." )
if ".conv." in name:
lowerCamelCase__ : Optional[Any] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
lowerCamelCase__ : Optional[int] = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCamelCase__ : int = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCamelCase__ : Optional[Any] = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
lowerCamelCase__ : Tuple = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
lowerCamelCase__ : List[Any] = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
lowerCamelCase__ : Optional[Any] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
lowerCamelCase__ : List[Any] = name.replace(F".global_rep.{i}.weight" , ".layernorm.weight" )
if F".global_rep.{i}.bias" in name:
lowerCamelCase__ : int = name.replace(F".global_rep.{i}.bias" , ".layernorm.bias" )
if ".global_rep." in name:
lowerCamelCase__ : Dict = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
lowerCamelCase__ : Union[str, Any] = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase__ : List[Any] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
lowerCamelCase__ : List[str] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
lowerCamelCase__ : Dict = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
lowerCamelCase__ : List[str] = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
lowerCamelCase__ : Dict = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
lowerCamelCase__ : Dict = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
lowerCamelCase__ : Optional[int] = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
lowerCamelCase__ : Optional[Any] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase__ : Union[str, Any] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
lowerCamelCase__ : Optional[Any] = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase__ : Dict = "mobilevit." + name
return name
def lowercase_ ( _A : Tuple , _A : str , _A : List[Any]=False ):
"""simple docstring"""
if base_model:
lowerCamelCase__ : Optional[int] = ""
else:
lowerCamelCase__ : Optional[Any] = "mobilevit."
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Tuple = orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
lowerCamelCase__ : Union[str, Any] = key[8:]
if "qkv" in key:
lowerCamelCase__ : Optional[Any] = key.split("." )
lowerCamelCase__ : List[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase__ : Dict = int(key_split[3] )
lowerCamelCase__ : Optional[Any] = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
lowerCamelCase__ : Optional[Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase__ : Optional[Any] = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
lowerCamelCase__ : Any = val[:dim, :]
lowerCamelCase__ : Optional[int] = val[dim : dim * 2, :]
lowerCamelCase__ : Any = val[-dim:, :]
else:
lowerCamelCase__ : List[str] = val[:dim]
lowerCamelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCamelCase__ : Optional[Any] = val[-dim:]
else:
lowerCamelCase__ : Dict = val
return orig_state_dict
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ : int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase_ ( _A : Dict , _A : Dict , _A : Optional[int] , _A : Optional[int]=False ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_mobilevit_config(__snake_case )
# load original state_dict
lowerCamelCase__ : str = torch.load(__snake_case , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
lowerCamelCase__ : Union[str, Any] = MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
lowerCamelCase__ : List[Any] = MobileViTForImageClassification(__snake_case ).eval()
lowerCamelCase__ : Tuple = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : Any = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ : Any = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__ : Tuple = model(**__snake_case )
lowerCamelCase__ : int = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase__ : Any = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase__ : List[str] = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase__ : int = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase__ : Optional[Any] = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
lowerCamelCase__ : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
lowerCamelCase__ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization="apple" )
model.push_to_hub(__snake_case , organization="apple" )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',"
" \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : int = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 356 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""ConditionalDetrFeatureExtractor"""]
SCREAMING_SNAKE_CASE_ = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableUnCLIPPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 55 | 0 |
import heapq
def UpperCamelCase (lowercase_: dict ) -> set[int]:
A__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase_ , [-1 * len(lowercase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A__ : Any = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A__ : Optional[int] = heapq.heappop(lowercase_ )[1][0]
chosen_vertices.add(lowercase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A__ : Tuple = elem[1][1].index(lowercase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 362 |
from __future__ import annotations
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> tuple[float, list[float]]:
A__ : Tuple = list(range(len(lowercase_ ) ) )
A__ : Union[str, Any] = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ : float = 0
A__ : list[float] = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 0 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=64 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : List[str] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Dict = model(lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MobileBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = MobileBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = MobileBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : List[str] = MobileBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Tuple = MobileBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_choices
UpperCAmelCase_ : Dict = MobileBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ )
UpperCAmelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = MobileBertModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
def __a ( __lowerCamelCase ):
return torch.tensor(
__lowerCamelCase, dtype=torch.long, device=__lowerCamelCase, )
_a = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(lowercase_ )
UpperCAmelCase_ : int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )[0]
UpperCAmelCase_ : int = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase_ : List[str] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=lowercase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 61 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ = logging.get_logger(__name__)
@dataclass
class _snake_case ( lowercase__):
UpperCamelCase__ : List[Any] =[
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : List[str], **__lowercase : str ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ = deprecated_arg[3:]
setattr(self, __lowercase, not kwargs.pop(__lowercase ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase__ = kwargs.pop("torchscript", self.torchscript )
lowercase__ = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics )
lowercase__ = kwargs.pop("fp16_opt_level", self.fpaa_opt_level )
super().__init__(**__lowercase )
UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Trace the models using torchscript"""})
UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""})
UpperCamelCase__ : str =field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def A__ ( self : List[Any] ):
requires_backends(self, ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowercase__ = torch.device("cpu" )
lowercase__ = 0
elif is_torch_tpu_available():
lowercase__ = xm.xla_device()
lowercase__ = 0
else:
lowercase__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowercase__ = torch.cuda.device_count()
return device, n_gpu
@property
def A__ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def A__ ( self : List[str] ):
requires_backends(self, ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def A__ ( self : Union[str, Any] ):
requires_backends(self, ["torch"] )
return self._setup_devices[0]
@property
def A__ ( self : int ):
requires_backends(self, ["torch"] )
return self._setup_devices[1]
@property
def A__ ( self : int ):
return self.n_gpu > 0
| 224 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __lowerCAmelCase ( ):
lowercase__ = 9
lowercase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase__ = kruskal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(SCREAMING_SNAKE_CASE_ ) == sorted(SCREAMING_SNAKE_CASE_ )
| 224 | 1 |
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ""
lowerCamelCase__ : Dict = ""
lowerCamelCase__ : List[str] = []
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase__ : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCamelCase__ : Union[str, Any] = self.__min_dist_top_down_dp(__lowerCamelCase , n - 1 )
lowerCamelCase__ : int = self.__min_dist_top_down_dp(m - 1 , __lowerCamelCase )
lowerCamelCase__ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCamelCase__ : Optional[int] = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = worda
lowerCamelCase__ : List[Any] = worda
lowerCamelCase__ : Tuple = [[-1 for _ in range(len(__lowerCamelCase ) )] for _ in range(len(__lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCamelCase ) - 1 , len(__lowerCamelCase ) - 1 )
def lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Tuple = worda
lowerCamelCase__ : Dict = worda
lowerCamelCase__ : Optional[int] = len(__lowerCamelCase )
lowerCamelCase__ : Tuple = len(__lowerCamelCase )
lowerCamelCase__ : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase__ : Tuple = j
elif j == 0: # second string is empty
lowerCamelCase__ : List[str] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase__ : List[Any] = self.dp[i - 1][j - 1]
else:
lowerCamelCase__ : Any = self.dp[i][j - 1]
lowerCamelCase__ : Optional[int] = self.dp[i - 1][j]
lowerCamelCase__ : int = self.dp[i - 1][j - 1]
lowerCamelCase__ : Dict = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
A : Any = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
A : List[Any] = input("Enter the first string: ").strip()
A : List[Any] = input("Enter the second string: ").strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 184 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__)
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True})
A__ = Features({"audio": Audio()})
A__ = Features({"transcription": Value("string")})
A__ = "audio"
A__ = "transcription"
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
lowerCamelCase__ : Tuple = copy.deepcopy(self )
lowerCamelCase__ : Tuple = self.input_schema.copy()
lowerCamelCase__ : Optional[int] = features[self.audio_column]
lowerCamelCase__ : int = input_schema
return task_template
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 184 | 1 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _SCREAMING_SNAKE_CASE ( _lowercase : dict ) ->tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->XGBClassifier:
'''simple docstring'''
a : List[Any] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : List[str] = load_iris()
a, a : Optional[int] = data_handling(_lowercase )
a, a, a, a : Tuple = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
a : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
a : Dict = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 79 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 50 ) ->int:
'''simple docstring'''
a : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.tf_model_dir , '''parameters.json''' )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(open(_A ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
SCREAMING_SNAKE_CASE : int = args.output + '''.pt'''
SCREAMING_SNAKE_CASE : str = OrderedDict()
with tf.device('''/CPU:0''' ):
SCREAMING_SNAKE_CASE : List[str] = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE : List[str] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE : Dict = reader.get_tensor(_A ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
SCREAMING_SNAKE_CASE : List[Any] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
SCREAMING_SNAKE_CASE : str = 8
SCREAMING_SNAKE_CASE : List[str] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_A )
elif key_name.startswith('''model/moe''' ):
SCREAMING_SNAKE_CASE : List[str] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
SCREAMING_SNAKE_CASE : Tuple = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
SCREAMING_SNAKE_CASE : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(_A )
elif key_name.endswith('''/softmlp/kernel''' ):
SCREAMING_SNAKE_CASE : Tuple = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
SCREAMING_SNAKE_CASE : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(_A )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
SCREAMING_SNAKE_CASE : Optional[int] = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
SCREAMING_SNAKE_CASE : Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE : str = torch.tensor(_A )
elif key_name.startswith('''model/mlp''' ):
SCREAMING_SNAKE_CASE : List[str] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
SCREAMING_SNAKE_CASE : Any = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(_A )
elif key_name.endswith('''/p1/bias''' ):
SCREAMING_SNAKE_CASE : List[str] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif key_name.endswith('''/p2/kernel''' ):
SCREAMING_SNAKE_CASE : str = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
SCREAMING_SNAKE_CASE : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_A )
elif key_name.endswith('''/p2/bias''' ):
SCREAMING_SNAKE_CASE : str = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
SCREAMING_SNAKE_CASE : Any = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif key_name.startswith('''model/ln''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.feed_forward.norm.bias''' % player
SCREAMING_SNAKE_CASE : Tuple = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Any = torch.tensor(_A )
elif key_name.endswith('''/g''' ):
SCREAMING_SNAKE_CASE : Dict = '''model.blocks.%d.feed_forward.norm.weight''' % player
SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(_A )
elif key_name.startswith('''model/att''' ):
SCREAMING_SNAKE_CASE : Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
SCREAMING_SNAKE_CASE : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE : List[str] = state[:, 0, :, :]
SCREAMING_SNAKE_CASE : Optional[Any] = state[:, 1, :, :]
SCREAMING_SNAKE_CASE : int = state[:, 2, :, :]
SCREAMING_SNAKE_CASE : Optional[int] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
SCREAMING_SNAKE_CASE : str = torch.tensor(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
SCREAMING_SNAKE_CASE : Optional[int] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
SCREAMING_SNAKE_CASE : Dict = torch.tensor(_A )
elif key_name.endswith('''/o/kernel''' ):
SCREAMING_SNAKE_CASE : str = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
SCREAMING_SNAKE_CASE : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Dict = torch.tensor(_A )
elif key_name.startswith('''model/an''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
SCREAMING_SNAKE_CASE : List[Any] = '''model.blocks.%d.self_attn.norm.bias''' % player
SCREAMING_SNAKE_CASE : Tuple = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_A )
elif key_name.endswith('''/g''' ):
SCREAMING_SNAKE_CASE : int = '''model.blocks.%d.self_attn.norm.weight''' % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
SCREAMING_SNAKE_CASE : List[Any] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE : Dict = '''model.%s.weight''' % nlayer
SCREAMING_SNAKE_CASE : int = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A )
if key_name.startswith('''model/wte''' ):
SCREAMING_SNAKE_CASE : int = '''lm_head.weight'''
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_A )
elif key_name.startswith('''model/wob''' ):
SCREAMING_SNAKE_CASE : Tuple = '''final_logits_bias'''
SCREAMING_SNAKE_CASE : Any = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Any = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE : int = torch.tensor(_A )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE : Tuple = '''model.last_project.weight'''
SCREAMING_SNAKE_CASE : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(_A )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE : Optional[int] = '''model.last_project.bias'''
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_A )
torch.save(_A , args.output )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
a__ : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 313 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
from string import ascii_uppercase
lowerCAmelCase : List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def a__ ( snake_case__ , snake_case__ ) -> str:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""int() can\'t convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""\'str\' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""\'float\' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCamelCase = """"""
lowerCamelCase = 0
lowerCamelCase = 0
while div != 1:
lowerCamelCase , lowerCamelCase = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
lowerCamelCase = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
lowerCamelCase = str(UpperCamelCase__ )
new_value += actual_value
lowerCamelCase = num // base
lowerCamelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 367 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , use_stable_embedding=_a , )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = OpenLlamaModel(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
lowerCamelCase = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase = type
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """single_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """multi_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = OpenLlamaModel(_a )
original_model.to(_a )
original_model.eval()
lowerCamelCase = original_model(_a ).last_hidden_state
lowerCamelCase = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = {"""type""": scaling_type, """factor""": 10.0}
lowerCamelCase = OpenLlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
lowerCamelCase = scaled_model(_a ).last_hidden_state
lowerCamelCase = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
| 168 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[Any] = logging.get_logger(__name__)
A__: List[str] = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """segformer"""
def __init__( self: Optional[Any] , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: Any=[2, 2, 2, 2] , __lowerCamelCase: Tuple=[8, 4, 2, 1] , __lowerCamelCase: Dict=[32, 64, 160, 256] , __lowerCamelCase: Optional[int]=[7, 3, 3, 3] , __lowerCamelCase: Optional[int]=[4, 2, 2, 2] , __lowerCamelCase: List[Any]=[1, 2, 5, 8] , __lowerCamelCase: List[Any]=[4, 4, 4, 4] , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Any=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Optional[Any]=1e-6 , __lowerCamelCase: Dict=256 , __lowerCamelCase: Tuple=255 , **__lowerCamelCase: Optional[Any] , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __lowerCamelCase , )
UpperCamelCase__: Optional[int] = num_channels
UpperCamelCase__: str = num_encoder_blocks
UpperCamelCase__: Optional[Any] = depths
UpperCamelCase__: Optional[Any] = sr_ratios
UpperCamelCase__: Dict = hidden_sizes
UpperCamelCase__: Optional[Any] = patch_sizes
UpperCamelCase__: Any = strides
UpperCamelCase__: Optional[int] = mlp_ratios
UpperCamelCase__: str = num_attention_heads
UpperCamelCase__: int = hidden_act
UpperCamelCase__: str = hidden_dropout_prob
UpperCamelCase__: Any = attention_probs_dropout_prob
UpperCamelCase__: List[str] = classifier_dropout_prob
UpperCamelCase__: Union[str, Any] = initializer_range
UpperCamelCase__: Tuple = drop_path_rate
UpperCamelCase__: Optional[Any] = layer_norm_eps
UpperCamelCase__: List[Any] = decoder_hidden_size
UpperCamelCase__: Optional[int] = kwargs.get("reshape_last_stage" , __lowerCamelCase )
UpperCamelCase__: List[str] = semantic_loss_ignore_index
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = version.parse("""1.11""")
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return 1e-4
@property
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
return 12
| 149 |
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = FunnelTokenizer
__UpperCamelCase = FunnelTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
_A = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def a_ ( self : Optional[int] , **a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : Tuple , **a__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : Any , a__ : Optional[int] ) -> int:
'''simple docstring'''
_A = "UNwant\u00E9d,running"
_A = "unwanted, running"
return input_text, output_text
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = self.tokenizer_class(self.vocab_file )
_A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
_A = tokenizer("UNwant\u00E9d,running" )
_A = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
_A = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 163 |
"""simple docstring"""
def a__ ( __lowercase ) -> int:
assert (
isinstance(__lowercase , __lowercase ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
_A , _A = 1, 1
for _ in range(number_of_steps - 1 ):
_A , _A = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCamelCase = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCAmelCase ( unittest.TestCase ,A_ ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = load_tool("text-question-answering" )
self.tool.setup()
snake_case : str = load_tool("text-question-answering" , remote=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
| 59 |
def _a ( SCREAMING_SNAKE_CASE : int = 1000000 ):
"""simple docstring"""
UpperCamelCase__ : Any = set(range(3 , SCREAMING_SNAKE_CASE , 2 ) )
primes.add(2 )
for p in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase__ : Union[str, Any] = [float(SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 146 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 233 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(_SCREAMING_SNAKE_CASE )]
snake_case_ = []
def generate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _SCREAMING_SNAKE_CASE )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case_ , snake_case_ = arr[k - 1], arr[i]
else: # k is odd
snake_case_ , snake_case_ = arr[k - 1], arr[0]
generate(k - 1 , _SCREAMING_SNAKE_CASE )
generate(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 233 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_a : List[str]= logging.get_logger(__name__)
# General docstring
_a : Optional[Any]= "ResNetConfig"
# Base docstring
_a : List[str]= "microsoft/resnet-50"
_a : Tuple= [1, 2_048, 7, 7]
# Image classification docstring
_a : Optional[Any]= "microsoft/resnet-50"
_a : int= "tiger cat"
_a : int= [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase ( nn.Module ):
def __init__(self : Optional[Any] , _A : int , _A : int , _A : int = 3 , _A : int = 1 , _A : str = "relu") -> List[str]:
super().__init__()
__snake_case : Optional[int] = nn.Convad(
_A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , bias=_A)
__snake_case : Optional[int] = nn.BatchNormad(_A)
__snake_case : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowercase (self : List[str] , _A : Tensor) -> Tensor:
__snake_case : Optional[int] = self.convolution(_A)
__snake_case : List[str] = self.normalization(_A)
__snake_case : int = self.activation(_A)
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__(self : Any , _A : ResNetConfig) -> List[str]:
super().__init__()
__snake_case : int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
__snake_case : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
__snake_case : int = config.num_channels
def _lowercase (self : int , _A : Tensor) -> Tensor:
__snake_case : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
__snake_case : Optional[int] = self.embedder(_A)
__snake_case : List[Any] = self.pooler(_A)
return embedding
class UpperCamelCase ( nn.Module ):
def __init__(self : Dict , _A : int , _A : int , _A : int = 2) -> Tuple:
super().__init__()
__snake_case : Tuple = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A)
__snake_case : List[Any] = nn.BatchNormad(_A)
def _lowercase (self : int , _A : Tensor) -> Tensor:
__snake_case : Union[str, Any] = self.convolution(_A)
__snake_case : Any = self.normalization(_A)
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__(self : List[Any] , _A : int , _A : int , _A : int = 1 , _A : str = "relu") -> List[str]:
super().__init__()
__snake_case : Any = in_channels != out_channels or stride != 1
__snake_case : List[str] = (
ResNetShortCut(_A , _A , stride=_A) if should_apply_shortcut else nn.Identity()
)
__snake_case : Tuple = nn.Sequential(
ResNetConvLayer(_A , _A , stride=_A) , ResNetConvLayer(_A , _A , activation=_A) , )
__snake_case : List[Any] = ACTaFN[activation]
def _lowercase (self : List[Any] , _A : Tuple) -> List[str]:
__snake_case : Optional[Any] = hidden_state
__snake_case : Optional[int] = self.layer(_A)
__snake_case : Any = self.shortcut(_A)
hidden_state += residual
__snake_case : int = self.activation(_A)
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__(self : Optional[Any] , _A : int , _A : int , _A : int = 1 , _A : str = "relu" , _A : int = 4) -> Any:
super().__init__()
__snake_case : Tuple = in_channels != out_channels or stride != 1
__snake_case : Any = out_channels // reduction
__snake_case : Tuple = (
ResNetShortCut(_A , _A , stride=_A) if should_apply_shortcut else nn.Identity()
)
__snake_case : Optional[int] = nn.Sequential(
ResNetConvLayer(_A , _A , kernel_size=1) , ResNetConvLayer(_A , _A , stride=_A) , ResNetConvLayer(_A , _A , kernel_size=1 , activation=_A) , )
__snake_case : Dict = ACTaFN[activation]
def _lowercase (self : List[str] , _A : str) -> Dict:
__snake_case : Optional[int] = hidden_state
__snake_case : int = self.layer(_A)
__snake_case : Optional[int] = self.shortcut(_A)
hidden_state += residual
__snake_case : Dict = self.activation(_A)
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__(self : Dict , _A : ResNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , ) -> Any:
super().__init__()
__snake_case : List[str] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
__snake_case : Tuple = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_A , _A , stride=_A , activation=config.hidden_act) , *[layer(_A , _A , activation=config.hidden_act) for _ in range(depth - 1)] , )
def _lowercase (self : Union[str, Any] , _A : Tensor) -> Tensor:
__snake_case : Any = input
for layer in self.layers:
__snake_case : Union[str, Any] = layer(_A)
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__(self : str , _A : ResNetConfig) -> Optional[Any]:
super().__init__()
__snake_case : Tuple = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
__snake_case : Any = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(_A , config.depths[1:]):
self.stages.append(ResNetStage(_A , _A , _A , depth=_A))
def _lowercase (self : str , _A : Tensor , _A : bool = False , _A : bool = True) -> BaseModelOutputWithNoAttention:
__snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case : Dict = hidden_states + (hidden_state,)
__snake_case : List[Any] = stage_module(_A)
if output_hidden_states:
__snake_case : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=_A , )
class UpperCamelCase ( lowercase ):
UpperCAmelCase : str = ResNetConfig
UpperCAmelCase : Dict = """resnet"""
UpperCAmelCase : List[str] = """pixel_values"""
UpperCAmelCase : str = True
def _lowercase (self : Tuple , _A : Optional[int]) -> Union[str, Any]:
if isinstance(_A , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def _lowercase (self : Dict , _A : List[str] , _A : Union[str, Any]=False) -> List[Any]:
if isinstance(_A , _A):
__snake_case : int = value
_a : Tuple= R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_a : str= R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , lowercase , )
class UpperCamelCase ( lowercase ):
def __init__(self : Dict , _A : List[Any]) -> List[str]:
super().__init__(_A)
__snake_case : Any = config
__snake_case : Optional[int] = ResNetEmbeddings(_A)
__snake_case : Any = ResNetEncoder(_A)
__snake_case : Optional[int] = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase (self : str , _A : Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
__snake_case : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = self.embedder(_A)
__snake_case : Optional[int] = self.encoder(
_A , output_hidden_states=_A , return_dict=_A)
__snake_case : Union[str, Any] = encoder_outputs[0]
__snake_case : Optional[Any] = self.pooler(_A)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowercase , )
class UpperCamelCase ( lowercase ):
def __init__(self : Dict , _A : str) -> Union[str, Any]:
super().__init__(_A)
__snake_case : Dict = config.num_labels
__snake_case : Optional[Any] = ResNetModel(_A)
# classification head
__snake_case : Tuple = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase (self : List[str] , _A : Optional[torch.FloatTensor] = None , _A : Optional[torch.LongTensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Dict = self.resnet(_A , output_hidden_states=_A , return_dict=_A)
__snake_case : Tuple = outputs.pooler_output if return_dict else outputs[1]
__snake_case : Optional[int] = self.classifier(_A)
__snake_case : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case : Union[str, Any] = 'single_label_classification'
else:
__snake_case : List[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case : Tuple = MSELoss()
if self.num_labels == 1:
__snake_case : Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
__snake_case : str = loss_fct(_A , _A)
elif self.config.problem_type == "single_label_classification":
__snake_case : Any = CrossEntropyLoss()
__snake_case : Tuple = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__snake_case : Optional[Any] = BCEWithLogitsLoss()
__snake_case : Tuple = loss_fct(_A , _A)
if not return_dict:
__snake_case : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states)
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , lowercase , )
class UpperCamelCase ( lowercase , lowercase ):
def __init__(self : Dict , _A : List[str]) -> Dict:
super().__init__(_A)
super()._init_backbone(_A)
__snake_case : List[Any] = [config.embedding_size] + config.hidden_sizes
__snake_case : List[str] = ResNetEmbeddings(_A)
__snake_case : Optional[Any] = ResNetEncoder(_A)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A)
@replace_return_docstrings(output_type=_A , config_class=_CONFIG_FOR_DOC)
def _lowercase (self : Tuple , _A : Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None) -> BackboneOutput:
__snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : List[Any] = self.embedder(_A)
__snake_case : Any = self.encoder(_A , output_hidden_states=_A , return_dict=_A)
__snake_case : Union[str, Any] = outputs.hidden_states
__snake_case : List[Any] = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__snake_case : Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_A , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_A , )
| 172 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int]= False
_a : int= False
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Any:
__snake_case : Any = parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_A , required=_A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_A , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_A , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_A , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_A , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_A , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_A , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_A , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_A , default=32 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_A , default=64 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_A , default=3E-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_A , default=1E-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_A)
def __init__(self : int , _A : Namespace) -> Tuple:
__snake_case : Optional[int] = logging.get_logger('transformers-cli/training')
__snake_case : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_A)
__snake_case : List[Any] = args.output
__snake_case : Any = args.column_label
__snake_case : str = args.column_text
__snake_case : Any = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
__snake_case : List[str] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
__snake_case : List[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
__snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = args.validation_split
__snake_case : str = args.train_batch_size
__snake_case : Any = args.valid_batch_size
__snake_case : Union[str, Any] = args.learning_rate
__snake_case : str = args.adam_epsilon
def _lowercase (self : List[str]) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase (self : str) -> int:
raise NotImplementedError
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 172 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """Speech2TextFeatureExtractor"""
UpperCAmelCase__ = """Speech2TextTokenizer"""
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ) -> str:
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = self.feature_extractor
lowerCamelCase__ : str = False
def __call__( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase , **UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : int = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : Any = kwargs.pop('audio' , UpperCAmelCase )
lowerCamelCase__ : int = kwargs.pop('sampling_rate' , UpperCAmelCase )
lowerCamelCase__ : List[str] = kwargs.pop('text' , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : int = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
if text is not None:
lowerCamelCase__ : List[str] = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : str = encodings['input_ids']
return inputs
def A_ ( self : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : Any ) -> Dict:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str] ) -> List[Any]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@contextmanager
def A_ ( self : List[Any] ) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Dict = self.tokenizer
yield
lowerCamelCase__ : Union[str, Any] = self.feature_extractor
lowerCamelCase__ : List[str] = False
| 45 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
if digit_amount > 0:
return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase )
return number - int(_UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 45 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case=sys.maxsize ):
"""simple docstring"""
_lowerCAmelCase = """bilinear"""
_lowerCAmelCase = max_size
_lowerCAmelCase = short_edge_length
def __call__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
for img in imgs:
_lowerCAmelCase , _lowerCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
_lowerCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_lowerCAmelCase = size * 1.0 / min(_snake_case , _snake_case )
if h < w:
_lowerCAmelCase , _lowerCAmelCase = size, scale * w
else:
_lowerCAmelCase , _lowerCAmelCase = scale * h, size
if max(_snake_case , _snake_case ) > self.max_size:
_lowerCAmelCase = self.max_size * 1.0 / max(_snake_case , _snake_case )
_lowerCAmelCase = newh * scale
_lowerCAmelCase = neww * scale
_lowerCAmelCase = int(neww + 0.5 )
_lowerCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
_lowerCAmelCase = Image.fromarray(_snake_case )
_lowerCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_lowerCAmelCase = np.asarray(_snake_case )
else:
_lowerCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_lowerCAmelCase = nn.functional.interpolate(
_snake_case , (newh, neww) , mode=self.interp_method , align_corners=_snake_case ).squeeze(0 )
img_augs.append(_snake_case )
return img_augs
class __lowerCAmelCase :
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_lowerCAmelCase = cfg.INPUT.FORMAT
_lowerCAmelCase = cfg.SIZE_DIVISIBILITY
_lowerCAmelCase = cfg.PAD_VALUE
_lowerCAmelCase = cfg.INPUT.MAX_SIZE_TEST
_lowerCAmelCase = cfg.MODEL.DEVICE
_lowerCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowerCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowerCAmelCase = lambda _snake_case : (x - self.pixel_mean) / self.pixel_std
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = tuple(max(_snake_case ) for s in zip(*[img.shape for img in images] ) )
_lowerCAmelCase = [im.shape[-2:] for im in images]
_lowerCAmelCase = [
nn.functional.pad(
_snake_case , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_snake_case , _snake_case )
]
return torch.stack(_snake_case ), torch.tensor(_snake_case )
def __call__( self , _snake_case , _snake_case=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = [images]
if single_image:
assert len(_snake_case ) == 1
for i in range(len(_snake_case ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_snake_case , images.pop(_snake_case ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_snake_case , torch.as_tensor(img_tensorize(images.pop(_snake_case ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_lowerCAmelCase = torch.tensor([im.shape[:2] for im in images] )
_lowerCAmelCase = self.aug(_snake_case )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_lowerCAmelCase = [self.normalizer(_snake_case ) for x in images]
# now pad them to do the following operations
_lowerCAmelCase , _lowerCAmelCase = self.pad(_snake_case )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_lowerCAmelCase = torch.true_divide(_snake_case , _snake_case )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
_lowerCAmelCase , _lowerCAmelCase = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 82 |
def _UpperCAmelCase ( snake_case = 50 ):
"""simple docstring"""
_lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a : int = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a : List[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a : Any = sorted(arg_to_scheduler.keys())
a : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="base", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> List[Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = 0
UpperCAmelCase_: int = Path(self.hparams.output_dir )
UpperCAmelCase_: List[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_: int = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"""num_labels""": num_labels} if num_labels is not None else {}), cache_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PretrainedConfig = config
UpperCAmelCase_: Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
assert hasattr(self.config, SCREAMING_SNAKE_CASE_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config, SCREAMING_SNAKE_CASE_, getattr(self.hparams, SCREAMING_SNAKE_CASE_ ) )
if tokenizer is None:
UpperCAmelCase_: Optional[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PreTrainedTokenizer = tokenizer
UpperCAmelCase_: Tuple = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_: int = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ), config=self.config, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: List[str] = model
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: int = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Union[str, Any] = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_: int = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
UpperCAmelCase_: Dict = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = self.model
UpperCAmelCase_: Any = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase_: int = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_: Any = Adafactor(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, scale_parameter=SCREAMING_SNAKE_CASE_, relative_step=SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Dict = AdamW(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
UpperCAmelCase_: str = optimizer
UpperCAmelCase_: Tuple = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return self.validation_end(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Union[str, Any] = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_: Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Any:
if stage == "test":
UpperCAmelCase_: Optional[int] = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_: Any = self.get_dataloader("""train""", self.hparams.train_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = len(self.train_dataloader().dataset )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> str:
raise NotImplementedError("""You must implement this for your task""" )
def __snake_case (self ) -> Union[str, Any]:
return self.train_loader
def __snake_case (self ) -> Dict:
return self.get_dataloader("""dev""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
return self.get_dataloader("""test""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return os.path.join(
self.hparams.data_dir, """cached_{}_{}_{}""".format(
SCREAMING_SNAKE_CASE_, list(filter(SCREAMING_SNAKE_CASE_, self.hparams.model_name_or_path.split("""/""" ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: Tuple = self.output_dir.joinpath("""best_tfmr""" )
UpperCAmelCase_: Union[str, Any] = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
@staticmethod
def __snake_case (SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
parser.add_argument(
"""--model_name_or_path""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, required=SCREAMING_SNAKE_CASE_, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--config_name""", default="""""", type=SCREAMING_SNAKE_CASE_, help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument(
"""--cache_dir""", default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / """test_run""" / """cache""" ), type=SCREAMING_SNAKE_CASE_, help="""Where do you want to store the pre-trained models downloaded from huggingface.co""", )
parser.add_argument(
"""--encoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Encoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--decoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Decoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--dropout""", type=SCREAMING_SNAKE_CASE_, help="""Dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--attention_dropout""", type=SCREAMING_SNAKE_CASE_, help="""Attention dropout probability (Optional). Goes into model.config""", )
parser.add_argument("""--learning_rate""", default=5E-5, type=SCREAMING_SNAKE_CASE_, help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""", default="""linear""", choices=SCREAMING_SNAKE_CASE_, metavar=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Learning rate scheduler""", )
parser.add_argument("""--weight_decay""", default=0.0, type=SCREAMING_SNAKE_CASE_, help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""", default=1E-8, type=SCREAMING_SNAKE_CASE_, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""", default=0, type=SCREAMING_SNAKE_CASE_, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""", default=4, type=SCREAMING_SNAKE_CASE_, help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""", dest="""max_epochs""", default=3, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--train_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--eval_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--adafactor""", action="""store_true""" )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE_ )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: List[str] = trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase_: Tuple = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
rank_zero_info("""***** Validation results *****""" )
UpperCAmelCase_: Union[str, Any] = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
rank_zero_info("""***** Test results *****""" )
UpperCAmelCase_: int = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_: List[str] = os.path.join(pl_module.hparams.output_dir, """test_results.txt""" )
with open(SCREAMING_SNAKE_CASE_, """w""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowerCAmelCase__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowerCAmelCase__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowerCAmelCase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=4_2 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowerCAmelCase__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowerCAmelCase_ (lowerCAmelCase__: BaseTransformer , lowerCAmelCase__: argparse.Namespace , lowerCAmelCase__: List[Any]=None , lowerCAmelCase__: List[Any]=True , lowerCAmelCase__: Optional[int]=[] , lowerCAmelCase__: List[str]=None , lowerCAmelCase__: Optional[Any]=None , **lowerCAmelCase__: Dict , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_: Optional[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_: Optional[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
UpperCAmelCase_: str = LoggingCallback()
UpperCAmelCase_: Optional[int] = {}
if args.fpaa:
UpperCAmelCase_: Optional[Any] = 1_6
if args.gpus > 1:
UpperCAmelCase_: Any = """auto"""
UpperCAmelCase_: str = """ddp"""
UpperCAmelCase_: Tuple = args.accumulate_grad_batches
UpperCAmelCase_: Dict = None
UpperCAmelCase_: Any = """auto"""
UpperCAmelCase_: List[str] = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 82 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82 | 1 |
'''simple docstring'''
def _A (lowerCAmelCase__ :list[int] ) -> Union[str, Any]:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
_a = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = LxmertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Any = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Optional[Any] = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : str = normalizer_class(**__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = do_lower_case
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 254 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : int = parent
UpperCamelCase__ : Union[str, Any] = 13
UpperCamelCase__ : Optional[Any] = 7
UpperCamelCase__ : str = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : Tuple = 99
UpperCamelCase__ : Tuple = 32
UpperCamelCase__ : int = 2
UpperCamelCase__ : List[str] = 4
UpperCamelCase__ : Union[str, Any] = 37
UpperCamelCase__ : Tuple = "gelu"
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : Optional[int] = 0.1
UpperCamelCase__ : int = 5_12
UpperCamelCase__ : Dict = 16
UpperCamelCase__ : Union[str, Any] = 2
UpperCamelCase__ : List[str] = 0.0_2
UpperCamelCase__ : str = 3
UpperCamelCase__ : Tuple = 4
UpperCamelCase__ : Union[str, Any] = None
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : str = None
UpperCamelCase__ : Tuple = None
if self.use_labels:
UpperCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = TFDistilBertModel(config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [input_ids, input_mask]
UpperCamelCase__ : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = TFDistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Any = TFDistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCamelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.num_labels
UpperCamelCase__ : List[Any] = TFDistilBertForSequenceClassification(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.num_choices
UpperCamelCase__ : Union[str, Any] = TFDistilBertForMultipleChoice(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : List[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCamelCase__ : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Any = self.num_labels
UpperCamelCase__ : str = TFDistilBertForTokenClassification(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : str = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) : Optional[int] = config_and_inputs
UpperCamelCase__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __a ( A__ , A__ , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCAmelCase : Optional[Any] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = TFDistilBertModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , dim=37 )
def __lowercase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCamelCase__ : Any = TFDistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
class __a ( unittest.TestCase ):
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCamelCase__ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ : int = model(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Any = [1, 6, 7_68]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
| 196 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase : str =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __a ( A__ ):
def __init__( self : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCamelCase__ : Optional[Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase__ : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ):
'''simple docstring'''
UpperCamelCase__ : Dict = load_image(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCamelCase__ : Any = candidate_labels
UpperCamelCase__ : Dict = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [text_inputs]
return inputs
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = model_inputs.pop("candidate_labels" )
UpperCamelCase__ : List[str] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = text_inputs[0]
else:
# Batching case.
UpperCamelCase__ : Union[str, Any] = text_inputs[0][0]
UpperCamelCase__ : Any = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = model_outputs.pop("candidate_labels" )
UpperCamelCase__ : int = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase__ : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCamelCase__ : Optional[Any] = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = [scores]
elif self.framework == "tf":
UpperCamelCase__ : Optional[Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
UpperCamelCase__ : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
UpperCamelCase__ : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 196 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.