code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
SCREAMING_SNAKE_CASE : List[str] = soup.find("div" , attrs={"class": "gs_ri"} )
SCREAMING_SNAKE_CASE : Optional[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
snake_case = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 62 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[Any] = logging.get_logger(__name__)
__lowercase : Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _A ( __lowerCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE_=8192 ,SCREAMING_SNAKE_CASE_=768 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=3072 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-12 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE_=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.4 ,SCREAMING_SNAKE_CASE_=256 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=255 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case : Dict = vocab_size
snake_case : List[str] = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[Any] = initializer_range
snake_case : Optional[Any] = layer_norm_eps
snake_case : int = image_size
snake_case : Dict = patch_size
snake_case : str = num_channels
snake_case : Optional[int] = use_mask_token
snake_case : Tuple = use_absolute_position_embeddings
snake_case : Optional[Any] = use_relative_position_bias
snake_case : List[str] = use_shared_relative_position_bias
snake_case : Any = layer_scale_init_value
snake_case : Optional[int] = drop_path_rate
snake_case : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case : Optional[int] = out_indices
snake_case : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case : List[Any] = use_auxiliary_head
snake_case : Dict = auxiliary_loss_weight
snake_case : Dict = auxiliary_channels
snake_case : Optional[Any] = auxiliary_num_convs
snake_case : Tuple = auxiliary_concat_input
snake_case : List[str] = semantic_loss_ignore_index
class _A ( __lowerCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( snake_case__: Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
lowercase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase_ , lowercase_ = image[0].size
lowercase_ , lowercase_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowercase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowercase_ = np.concatenate(_lowerCamelCase , axis=0 )
lowercase_ = np.array(_lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
lowercase_ = image.transpose(0 , 3 , 1 , 2 )
lowercase_ = 2.0 * image - 1.0
lowercase_ = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
lowercase_ = torch.cat(_lowerCamelCase , dim=0 )
return image
def a ( snake_case__: Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
lowercase_ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowercase_ , lowercase_ = mask[0].size
lowercase_ , lowercase_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase_ = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
lowercase_ = np.concatenate(_lowerCamelCase , axis=0 )
lowercase_ = mask.astype(np.floataa ) / 2_5_5.0
lowercase_ = 0
lowercase_ = 1
lowercase_ = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
lowercase_ = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowercase__( __lowerCAmelCase ):
"""simple docstring"""
a :UNetaDModel
a :RePaintScheduler
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int = 2_5_0 , SCREAMING_SNAKE_CASE_ : List[str] = 0.0 , SCREAMING_SNAKE_CASE_ : List[str] = 1_0 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0 , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : Union[str, Any] = "pil" , SCREAMING_SNAKE_CASE_ : List[str] = True , ) -> Union[ImagePipelineOutput, Tuple]:
lowercase_ = image
lowercase_ = _preprocess_image(__SCREAMING_SNAKE_CASE )
lowercase_ = original_image.to(device=self.device , dtype=self.unet.dtype )
lowercase_ = _preprocess_mask(__SCREAMING_SNAKE_CASE )
lowercase_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowercase_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ = original_image.shape
lowercase_ = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
lowercase_ = eta
lowercase_ = self.scheduler.timesteps[0] + 1
lowercase_ = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowercase_ = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
lowercase_ = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowercase_ = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ = t
lowercase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
from math import factorial
def __magic_name__ ( lowercase = 100 ) -> int:
"""simple docstring"""
return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip()))) | 458 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list[int] , _snake_case : list[int] ) -> None:
'''simple docstring'''
_A = len(_lowerCamelCase )
print('The following activities are selected:' )
# The first activity is always selected
_A = 0
print(_lowerCamelCase , end=',' )
# Consider rest of the activities
for j in range(_lowerCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowerCamelCase , end=',' )
_A = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a = [1, 3, 0, 5, 8, 5]
a = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 7 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
try:
with open(_lowerCamelCase , 'rb' ) as flax_state_f:
_a : Dict = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
_a : List[str] = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
_a : Optional[Any] = jax.tree_util.tree_map(
lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_a : Dict = ''
_a : Any = flatten_dict(_lowerCamelCase , sep='.' )
_a : Any = pt_model.state_dict()
# keep track of unexpected & missing keys
_a : Tuple = []
_a : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_a : List[str] = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_a : int = flax_key_tuple_array[:-1] + ['weight']
_a : Tuple = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_a : str = flax_key_tuple_array[:-1] + ['weight']
_a : List[str] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_a : Any = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
_a : Optional[Any] = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
_a : Union[str, Any] = '.'.join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
_a : List[Any] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_a : Dict = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_a : int = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(_lowerCamelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
return pt_model
| 120 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 393 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __a ( __lowerCAmelCase ):
lowerCamelCase : int ='''wavlm'''
def __init__( self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1E-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=128 , UpperCAmelCase=16 , UpperCAmelCase=320 , UpperCAmelCase=800 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.0_5 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=320 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=100 , UpperCAmelCase=256 , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=(512, 512, 512, 512, 1500) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=512 , UpperCAmelCase=80 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = feat_extract_norm
lowerCAmelCase_ = feat_extract_activation
lowerCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = conv_bias
lowerCAmelCase_ = num_buckets
lowerCAmelCase_ = max_bucket_distance
lowerCAmelCase_ = num_conv_pos_embeddings
lowerCAmelCase_ = num_conv_pos_embedding_groups
lowerCAmelCase_ = len(self.conv_dim )
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = feat_proj_dropout
lowerCAmelCase_ = final_dropout
lowerCAmelCase_ = layerdrop
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_ctc_classes
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = do_stable_layer_norm
lowerCAmelCase_ = use_weighted_layer_sum
lowerCAmelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ = apply_spec_augment
lowerCAmelCase_ = mask_time_prob
lowerCAmelCase_ = mask_time_length
lowerCAmelCase_ = mask_time_min_masks
lowerCAmelCase_ = mask_feature_prob
lowerCAmelCase_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ = num_codevectors_per_group
lowerCAmelCase_ = num_codevector_groups
lowerCAmelCase_ = contrastive_logits_temperature
lowerCAmelCase_ = num_negatives
lowerCAmelCase_ = codevector_dim
lowerCAmelCase_ = proj_codevector_dim
lowerCAmelCase_ = diversity_loss_weight
# ctc loss
lowerCAmelCase_ = ctc_loss_reduction
lowerCAmelCase_ = ctc_zero_infinity
# adapter
lowerCAmelCase_ = add_adapter
lowerCAmelCase_ = adapter_kernel_size
lowerCAmelCase_ = adapter_stride
lowerCAmelCase_ = num_adapter_layers
lowerCAmelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = xvector_output_dim
@property
def lowerCamelCase_ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 552 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = BertConfig.from_json_file(_lowerCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
lowerCamelCase = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 457 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 0 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'{test_file} instead.' )
a__ = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
a__ = components[:-1] + [test_fn.replace(".py" , "" )]
a__ = ".".join(_lowerCamelCase )
return test_module_path
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = get_module_path(_lowerCamelCase )
a__ = importlib.import_module(_lowerCamelCase )
return test_module
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = []
a__ = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(_lowerCamelCase , _lowerCamelCase ) )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowercase : x.__name__ )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = []
a__ = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
a__ = getattr(_lowerCamelCase , _lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
a__ = getattr(_lowerCamelCase , "all_model_classes" , [] )
if len(_lowerCamelCase ) > 0:
test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowercase : x.__name__ )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = get_test_classes(_lowerCamelCase )
a__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowercase : x.__name__ )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = test_class()
if hasattr(_lowerCamelCase , "setUp" ):
test.setUp()
a__ = None
if hasattr(_lowerCamelCase , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
a__ = test.model_tester.__class__
return model_tester
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = get_test_classes(_lowerCamelCase )
a__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowercase : x.__name__ )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = get_test_classes_for_model(_lowerCamelCase , _lowerCamelCase )
a__ = []
for test_class in test_classes:
a__ = get_model_tester_from_test_class(_lowerCamelCase )
if tester_class is not None:
tester_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase , key=lambda _lowercase : x.__name__ )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = get_test_classes(_lowerCamelCase )
a__ = {test_class: get_model_tester_from_test_class(_lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = get_model_classes(_lowerCamelCase )
a__ = {
model_class: get_test_classes_for_model(_lowerCamelCase , _lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = get_model_classes(_lowerCamelCase )
a__ = {
model_class: get_tester_classes_for_model(_lowerCamelCase , _lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return o
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return o.__name__
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_json(_lowerCamelCase ) for x in o]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {to_json(_lowerCamelCase ): to_json(_lowerCamelCase ) for k, v in o.items()}
else:
return o
| 331 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> list[list[int]]:
UpperCAmelCase__ : Any = []
create_all_state(1 , _lowerCamelCase , _lowerCamelCase , [] , _lowerCamelCase )
return result
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase , total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1 , _lowerCamelCase , level - 1 , _lowerCamelCase , _lowerCamelCase )
current_list.pop()
def a__ ( lowerCAmelCase__ ) -> None:
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = 4
UpperCamelCase__ = 2
UpperCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 75 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return x + 2
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Dict = "x = 3"
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : str = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3} )
SCREAMING_SNAKE_CASE : Tuple = "x = y"
SCREAMING_SNAKE_CASE : Dict = {"y": 5}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 5, "y": 5} )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = "y = add_two(x)"
SCREAMING_SNAKE_CASE : List[str] = {"x": 3}
SCREAMING_SNAKE_CASE : Any = evaluate(__SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Dict = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = "x = 3"
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3} )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Any = "test_dict = {\'x\': x, \'y\': add_two(x)}"
SCREAMING_SNAKE_CASE : Optional[Any] = {"x": 3}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(__SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : int = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "y": 5} )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = "text = f\'This is x: {x}.\'"
SCREAMING_SNAKE_CASE : Optional[int] = {"x": 3}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "text": "This is x: 3."} )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE : List[str] = {"x": 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE : Optional[int] = {"x": 8}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 8, "y": 5} )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Any = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE : str = {"x": 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "test_list": [3, 5]} )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[Any] = "y = x"
SCREAMING_SNAKE_CASE : Tuple = {"x": 3}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "y": 3} )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : int = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE : List[str] = {"x": 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE : Optional[int] = "test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']"
SCREAMING_SNAKE_CASE : int = {"x": 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__SCREAMING_SNAKE_CASE , {"add_two": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(__SCREAMING_SNAKE_CASE , {"range": range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"x": 2, "i": 2} )
| 62 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
__lowercase : Any = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowercase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowercase : Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 36 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a :Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a :bool = field(default=__lowerCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
a :Optional[str] = field(
default=__lowerCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
a :int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a :bool = field(
default=__lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
lowercase_ = import_module('''tasks''' )
try:
lowercase_ = getattr(_lowerCamelCase , model_args.task_type )
lowercase_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase_ = token_classification_task.get_labels(data_args.labels )
lowercase_ = dict(enumerate(_lowerCamelCase ) )
lowercase_ = len(_lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase_ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase_ = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase_ = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__: np.ndarray , snake_case__: np.ndarray ) -> Tuple[List[int], List[int]]:
lowercase_ = np.argmax(_lowerCamelCase , axis=2 )
lowercase_ , lowercase_ = preds.shape
lowercase_ = [[] for _ in range(_lowerCamelCase )]
lowercase_ = [[] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(snake_case__: EvalPrediction ) -> Dict:
lowercase_ , lowercase_ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCamelCase , _lowerCamelCase ),
"precision": precision_score(_lowerCamelCase , _lowerCamelCase ),
"recall": recall_score(_lowerCamelCase , _lowerCamelCase ),
"f1": fa_score(_lowerCamelCase , _lowerCamelCase ),
}
# Data collator
lowercase_ = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase_ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ = trainer.evaluate()
lowercase_ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _lowerCamelCase , _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase_ = TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase_ , lowercase_ , lowercase_ = trainer.predict(_lowerCamelCase )
lowercase_ , lowercase_ = align_predictions(_lowerCamelCase , _lowerCamelCase )
lowercase_ = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , _lowerCamelCase , _lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase_ = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return results
def a ( snake_case__: Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 97 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 0 |
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self, snake_case__ ) -> str:
"""simple docstring"""
lowercase_ : Tuple = n
lowercase_ : Optional[Any] = [None] * self.n
lowercase_ : Any = 0 # index of the first element
lowercase_ : List[str] = 0
lowercase_ : Tuple = 0
def __len__( self ) -> int:
"""simple docstring"""
return self.size
def snake_case__ ( self ) -> bool:
"""simple docstring"""
return self.size == 0
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def snake_case__ ( self, snake_case__ ) -> Tuple:
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
lowercase_ : Any = data
lowercase_ : int = (self.rear + 1) % self.n
self.size += 1
return self
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
lowercase_ : Union[str, Any] = self.array[self.front]
lowercase_ : Optional[Any] = None
lowercase_ : Optional[Any] = (self.front + 1) % self.n
self.size -= 1
return temp | 458 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = XGLMTokenizer
UpperCAmelCase : Union[str, Any] = XGLMTokenizerFast
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[str] = True
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Tuple ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_008 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def lowerCAmelCase_ ( self : List[Any] ):
_A = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
_A = tokenizer.tokenize('This is a test' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_A = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def lowerCAmelCase_ ( self : str ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name )
_A = XGLMTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE )
_A = pickle.dumps(__SCREAMING_SNAKE_CASE )
pickle.loads(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : str ):
if not self.test_rust_tokenizer:
return
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = 'I was born in 92000, and this is falsé.'
_A = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
_A = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_A = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
_A = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(__SCREAMING_SNAKE_CASE )
_A = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_A = 'Hello World!'
_A = [2, 31_227, 4_447, 35]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_A = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_A = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_A = {
'input_ids': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='facebook/xglm-564M' , padding=__SCREAMING_SNAKE_CASE , )
| 7 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 0 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Any = old_name
if "patch_embed" in old_name:
_a , _a , _a : List[Any] = old_name.split('.' )
if layer == "0":
_a : Tuple = old_name.replace('0' , 'convolution1' )
elif layer == "1":
_a : Optional[int] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
_a : Dict = old_name.replace('3' , 'convolution2' )
else:
_a : Union[str, Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _lowerCamelCase ):
_a : List[Any] = r'\b\d{2}\b'
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
_a : Dict = re.search(r'\d\.\d\d.' , _lowerCamelCase ).group()
else:
_a : List[str] = re.search(r'\d\.\d.' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
_a : Tuple = old_name.replace(_lowerCamelCase , '' )
_a : Union[str, Any] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
_a : List[str] = 'intermediate_stages.' + trimmed_name
else:
_a : str = old_name.replace(_lowerCamelCase , '' )
if int(match[2] ) < num_meta4D_last_stage:
_a : Union[str, Any] = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
_a : int = str(int(match[2] ) - num_meta4D_last_stage )
_a : int = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
_a : List[Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
_a : Union[str, Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
_a : List[Any] = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
_a : Optional[int] = trimmed_name.replace('fc2' , 'linear_out' )
_a : Dict = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _lowerCamelCase ):
_a : List[Any] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
_a : Any = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a : Tuple = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a : Any = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
_a : str = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
_a : Dict = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
_a : Union[str, Any] = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
_a : List[str] = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a : int = new_name.replace('norm' , 'layernorm' )
_a : Optional[int] = 'efficientformer.' + new_name
else:
_a : Tuple = 'efficientformer.encoder.' + new_name
return new_name
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
for key in checkpoint.copy().keys():
_a : Optional[int] = checkpoint.pop(_lowerCamelCase )
_a : Optional[Any] = val
return checkpoint
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def UpperCAmelCase_ ( A , A , A , A ):
'''simple docstring'''
_a : List[str] = torch.load(_lowerCamelCase , map_location='cpu' )['model']
_a : Dict = EfficientFormerConfig.from_json_file(_lowerCamelCase )
_a : Union[str, Any] = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
_a : List[Any] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
_a : List[Any] = config.depths[-1] - config.num_metaad_blocks + 1
_a : Tuple = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
_a : Tuple = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
_a : str = prepare_img()
_a : Optional[Any] = 2_5_6
_a : Union[str, Any] = 2_2_4
_a : List[str] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
_a : str = processor(images=_lowerCamelCase , return_tensors='pt' ).pixel_values
# original processing pipeline
_a : List[Any] = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
_a : Tuple = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
_a : List[Any] = model(_lowerCamelCase )
_a : Optional[Any] = outputs.logits
_a : Optional[int] = (1, 1_0_0_0)
if "l1" in model_name:
_a : str = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :1_0] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a : Any = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :1_0] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a : Optional[Any] = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add model' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add image processor' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 120 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return line.startswith(_lowerCamelCase ) or len(_lowerCamelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' ,_lowerCamelCase ) is not None
def UpperCAmelCase ( UpperCAmelCase )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = object_name.split('''.''' )
SCREAMING_SNAKE_CASE_ = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE_ = parts[i]
while i < len(_lowerCamelCase ) and not os.path.isfile(os.path.join(_lowerCamelCase ,f'''{module}.py''' ) ):
i += 1
if i < len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCamelCase ,parts[i] )
if i >= len(_lowerCamelCase ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(_lowerCamelCase ,f'''{module}.py''' ) ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCamelCase ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCamelCase ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE_ = line_index
while line_index < len(_lowerCamelCase ) and _should_continue(lines[line_index] ,_lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE_ = lines[start_index:line_index]
return "".join(_lowerCamelCase )
A_ = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
A_ = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
A_ = re.compile(R"<FILL\s+[^>]*>")
def UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = code.split('''\n''' )
SCREAMING_SNAKE_CASE_ = 0
while idx < len(_lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_lowerCamelCase ):
return re.search(R'''^(\s*)\S''' ,lines[idx] ).groups()[0]
return ""
def UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(get_indent(_lowerCamelCase ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE_ = f'''class Bla:\n{code}'''
SCREAMING_SNAKE_CASE_ = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = black.format_str(_lowerCamelCase ,mode=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = style_docstrings_in_code(_lowerCamelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase=False )-> Tuple:
'''simple docstring'''
with open(_lowerCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = search.groups()
SCREAMING_SNAKE_CASE_ = find_code_in_diffusers(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = get_indent(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE_ = theoretical_indent
SCREAMING_SNAKE_CASE_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE_ = True
while line_index < len(_lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_lowerCamelCase ):
break
SCREAMING_SNAKE_CASE_ = lines[line_index]
SCREAMING_SNAKE_CASE_ = _should_continue(_lowerCamelCase ,_lowerCamelCase ) and re.search(f'''^{indent}# End copy''' ,_lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE_ = lines[start_index:line_index]
SCREAMING_SNAKE_CASE_ = ''''''.join(_lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE_ = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_lowerCamelCase ) is None]
SCREAMING_SNAKE_CASE_ = '''\n'''.join(_lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = replace_pattern.replace('''with''' ,'''''' ).split(''',''' )
SCREAMING_SNAKE_CASE_ = [_re_replace_pattern.search(_lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pattern.groups()
SCREAMING_SNAKE_CASE_ = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE_ = re.sub(obja.lower() ,obja.lower() ,_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = re.sub(obja.upper() ,obja.upper() ,_lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE_ = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE_ = start_index + 1
if overwrite and len(_lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(_lowerCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(_lowerCamelCase )
return diffs
def UpperCAmelCase ( UpperCAmelCase = False )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = glob.glob(os.path.join(_lowerCamelCase ,'''**/*.py''' ) ,recursive=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = []
for filename in all_files:
SCREAMING_SNAKE_CASE_ = is_copy_consistent(_lowerCamelCase ,_lowerCamelCase )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = '''\n'''.join(_lowerCamelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 393 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 552 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowercase ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = BarthezTokenizer
UpperCamelCase : Any = BarthezTokenizerFast
UpperCamelCase : Dict = True
UpperCamelCase : Optional[int] = True
def __A ( self ) -> Dict:
'''simple docstring'''
super().setUp()
lowerCamelCase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
lowerCamelCase = tokenizer
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """<pad>"""
lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __A ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase = [0, 57, 30_18, 7_03_07, 91, 2]
lowerCamelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = """I was born in 92000, and this is falsé."""
lowerCamelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
lowerCamelCase = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowerCamelCase = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = {"""input_ids""": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCamelCase = [
"""Le transformeur est un modèle d\'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__SCREAMING_SNAKE_CASE , )
| 457 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = IFPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase_ ( self : int ):
return self._get_dummy_components()
def lowerCAmelCase_ ( self : List[Any] ,a__ : List[Any] ,a__ : Any=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
a__ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" ,reason="float16 requires CUDA" )
def lowerCAmelCase_ ( self : Any ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase_ ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def lowerCAmelCase_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str ):
a__ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" ,variant="fp16" ,torch_dtype=torch.floataa )
a__ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" ,variant="fp16" ,torch_dtype=torch.floataa ,text_encoder=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a__ , a__ = pipe_a.encode_prompt("anime turtle" ,device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a__ = None
a__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a__ = IFImgaImgPipeline(**pipe_a.components )
a__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a__ = IFInpaintingPipeline(**pipe_a.components )
a__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : int ,a__ : List[Any] ,a__ : Any ,a__ : str ,a__ : Dict ):
_start_torch_memory_measurement()
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE ,negative_prompt_embeds=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,generator=__SCREAMING_SNAKE_CASE ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE ,negative_prompt_embeds=__SCREAMING_SNAKE_CASE ,image=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : Dict ,a__ : Union[str, Any] ,a__ : str ,a__ : List[str] ,a__ : str ):
_start_torch_memory_measurement()
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE ,negative_prompt_embeds=__SCREAMING_SNAKE_CASE ,image=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,generator=__SCREAMING_SNAKE_CASE ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE ,negative_prompt_embeds=__SCREAMING_SNAKE_CASE ,image=__SCREAMING_SNAKE_CASE ,original_image=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : int ,a__ : List[str] ,a__ : List[str] ,a__ : Optional[int] ,a__ : Optional[Any] ):
_start_torch_memory_measurement()
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE ,negative_prompt_embeds=__SCREAMING_SNAKE_CASE ,image=__SCREAMING_SNAKE_CASE ,mask_image=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,generator=__SCREAMING_SNAKE_CASE ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(1 ) ).to(__SCREAMING_SNAKE_CASE )
a__ = pipe_a(
prompt_embeds=__SCREAMING_SNAKE_CASE ,negative_prompt_embeds=__SCREAMING_SNAKE_CASE ,image=__SCREAMING_SNAKE_CASE ,mask_image=__SCREAMING_SNAKE_CASE ,original_image=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,num_inference_steps=2 ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ():
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 331 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : Optional[int] = len(_lowerCamelCase )
UpperCAmelCase__ : Tuple = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase__ : Union[str, Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase__ : Optional[Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase__ : List[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase__ : List[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Any=18 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Tuple=400 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=True , ):
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {"shortest_edge": 20}
SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : str = size
SCREAMING_SNAKE_CASE : Dict = do_center_crop
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE : List[str] = do_flip_channel_order
def _A ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def _A ( self : str ):
SCREAMING_SNAKE_CASE : str = MobileViTImageProcessingTester(self )
@property
def _A ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_center_crop" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "center_crop" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "do_flip_channel_order" ) )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _A ( self : Optional[Any] ):
pass
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 62 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,*SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case : List[Any] = eval_examples
snake_case : Optional[int] = post_process_function
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = "eval" ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Dict = gen_kwargs.copy()
snake_case : Optional[Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
snake_case : Union[str, Any] = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
snake_case : Union[str, Any] = gen_kwargs
snake_case : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case : Any = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : str = self.compute_metrics
snake_case : Tuple = None
snake_case : Optional[Any] = time.time()
snake_case : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : Tuple = eval_loop(
__SCREAMING_SNAKE_CASE ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=__SCREAMING_SNAKE_CASE ,metric_key_prefix=__SCREAMING_SNAKE_CASE ,)
finally:
snake_case : List[str] = compute_metrics
snake_case : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case : Optional[int] = self.post_process_function(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
snake_case : Union[str, Any] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
snake_case : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case : Optional[int] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,__SCREAMING_SNAKE_CASE )
return metrics
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_ = "test" ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = gen_kwargs.copy()
snake_case : List[Any] = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : Optional[Any] = self.compute_metrics
snake_case : Optional[Any] = None
snake_case : str = time.time()
snake_case : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : str = eval_loop(
__SCREAMING_SNAKE_CASE ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=__SCREAMING_SNAKE_CASE ,metric_key_prefix=__SCREAMING_SNAKE_CASE ,)
finally:
snake_case : Optional[int] = compute_metrics
snake_case : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,"""predict""" )
snake_case : List[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
snake_case : int = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=__SCREAMING_SNAKE_CASE )
| 36 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ = 2_048
UpperCAmelCase_ = 4_096
UpperCAmelCase_ = 42
UpperCAmelCase_ = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCAmelCase_ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __magic_name__ ( lowercase ) -> List[Any]:
"""simple docstring"""
def choose_first(lowercase , lowercase=False ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
lowercase_ : Dict = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase_ : Optional[Any] = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
lowercase_ : Optional[Any] = {"""id""": example["""id"""]}
lowercase_ : Dict = example["""annotations"""]
lowercase_ : Optional[int] = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase_ : Tuple = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
lowercase_ : Optional[int] = []
lowercase_ : List[Any] = []
lowercase_ : int = ["""<cls>"""]
else:
lowercase_ : Dict = ["""short"""]
lowercase_ : Optional[int] = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
lowercase_ : Tuple = ["""long"""]
lowercase_ : List[Any] = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCamelCase )
lowercase_ : List[Any] = []
answer.update(_lowerCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase_ : Any = True
else:
lowercase_ : List[str] = False
lowercase_ : List[Any] = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _lowerCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def __magic_name__ ( lowercase , lowercase=False ) -> Any:
"""simple docstring"""
lowercase_ : int = _get_single_answer(_lowerCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase_ : str = example["""document"""]["""tokens"""]
lowercase_ : List[str] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase_ : Union[str, Any] = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase_ : Any = example["""document"""]["""tokens"""]
lowercase_ : int = answer["""start_token"""]
lowercase_ : Optional[int] = answer["""end_token"""]
lowercase_ : int = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase_ : Dict = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase_ : Any = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
lowercase_ : List[Any] = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
lowercase_ : str = """ """.join([old[i] for i in range(len(_lowerCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , _lowerCamelCase , end="""\n""" )
print("""Old:""" , _lowerCamelCase , end="""\n\n""" )
return {
"context": " ".join(_lowerCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( lowercase , lowercase , lowercase=2048 , lowercase=4096 , lowercase=True ) -> List[Any]:
"""simple docstring"""
lowercase_ : Tuple = get_context_and_ans(_lowerCamelCase , assertion=_lowerCamelCase )
lowercase_ : int = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase_ : int = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
lowercase_ : str = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase_ : Any = []
lowercase_ : Optional[int] = []
lowercase_ : int = input_ids[:q_len]
lowercase_ : Tuple = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase_ : Union[str, Any] = i + max_length - q_len
lowercase_ : str = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_lowerCamelCase ),
"end_token": [-100] * len(_lowerCamelCase ),
"category": category,
},
}
lowercase_ : str = out["""context"""].split()
lowercase_ : Dict = splitted_context[answer["""end_token"""]]
lowercase_ : Union[str, Any] = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCamelCase , ).input_ids )
lowercase_ : Union[str, Any] = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase_ : int = len(tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase_ : Union[str, Any] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
lowercase_ : Dict = answer["""start_token"""]
lowercase_ : Any = answer["""end_token"""]
if assertion:
lowercase_ : str = tokenizer.decode(_lowerCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , _lowerCamelCase , end="""\n\n""" )
if len(_lowerCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase_ : Dict = input_ids[:q_len]
lowercase_ : List[Any] = range(_lowerCamelCase , len(_lowerCamelCase ) , max_length - doc_stride )
lowercase_ : Dict = []
lowercase_ : Optional[Any] = []
lowercase_ : List[Any] = []
lowercase_ : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase_ : Any = i + max_length - q_len
lowercase_ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase_ : Tuple = start_token - i + q_len
lowercase_ : List[str] = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
lowercase_ : str = -100
lowercase_ : Union[str, Any] = -100
answers_category.append("""null""" )
lowercase_ : str = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCamelCase )
answers_end_token.append(_lowerCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(_lowerCamelCase ) )
print("""Old:""" , tokenizer.decode(_lowerCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( lowercase , lowercase , lowercase=2048 , lowercase=4096 , lowercase=False ) -> Any:
"""simple docstring"""
lowercase_ : List[str] = get_strided_contexts_and_ans(
_lowerCamelCase , _lowerCamelCase , doc_stride=_lowerCamelCase , max_length=_lowerCamelCase , assertion=_lowerCamelCase , )
return example
def __magic_name__ ( lowercase , lowercase ) -> int:
"""simple docstring"""
with jsonlines.open(_lowerCamelCase , """a""" ) as writer:
for example in tqdm(_lowerCamelCase , total=len(_lowerCamelCase ) , desc="""Saving samples ... """ ):
lowercase_ : Tuple = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ = load_dataset("""natural_questions""")
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCAmelCase_ = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
UpperCAmelCase_ = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
UpperCAmelCase_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name) | 458 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a = logging.get_logger(__name__)
a = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''marian'''
UpperCAmelCase : Any = ['''past_key_values''']
UpperCAmelCase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[Any]=58_101 , _UpperCAmelCase : int=None , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Optional[int]=4_096 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Dict=4_096 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Optional[int]=58_100 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[str]=58_100 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Dict , ):
_A = vocab_size
_A = decoder_vocab_size or vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase_ ( self : Any ):
if self.task in ["default", "seq2seq-lm"]:
_A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_A = {0: 'batch'}
_A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_A = {0: 'batch', 1: 'decoder_sequence'}
_A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_A , _A = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
_A = {0: 'batch', 2: 'past_sequence + sequence'}
_A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase_ ( self : Any ):
if self.task in ["default", "seq2seq-lm"]:
_A = super().outputs
else:
_A = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
_A , _A = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
_A = {0: 'batch', 2: 'past_sequence + sequence'}
_A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] = -1 , _UpperCAmelCase : str = -1 , _UpperCAmelCase : Optional[Any] = False , _UpperCAmelCase : str = None , ):
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
_A = seq_length if not self.use_past else 1
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_A = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_A = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_A , _A = common_inputs['input_ids'].shape
_A = common_inputs['decoder_input_ids'].shape[1]
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = decoder_seq_length + 3
_A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
_A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_A , _A = self.num_layers
_A = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_A = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
_A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
_A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] = -1 , _UpperCAmelCase : Union[str, Any] = -1 , _UpperCAmelCase : Tuple = False , _UpperCAmelCase : Dict = None , ):
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_A , _A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_A = seqlen + 2
_A , _A = self.num_layers
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = common_inputs['attention_mask'].dtype
_A = torch.cat(
[common_inputs['attention_mask'], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
_A = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] = -1 , _UpperCAmelCase : List[str] = -1 , _UpperCAmelCase : Optional[Any] = False , _UpperCAmelCase : List[str] = None , ):
_A = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_A = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
_A = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_A = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Union[str, Any] = -1 , _UpperCAmelCase : Dict = False , _UpperCAmelCase : Optional[Any] = None , ):
if self.task in ["default", "seq2seq-lm"]:
_A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
_A = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
_A = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
_A = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return 1E-4
| 7 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[Any] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_a : Optional[int] = DatasetInfosDict.from_directory(_lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , ),
] , )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Dict = str(_lowerCamelCase )
dataset_info.write_to_directory(_lowerCamelCase )
_a : Dict = DatasetInfo.from_directory(_lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , 'dataset_info.json' ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : int = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : Tuple = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : Union[str, Any] = yaml.safe_dump(_lowerCamelCase )
_a : Optional[int] = yaml.safe_load(_lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : str = DatasetInfo()
_a : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=4_2 ),
'v2': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : int = str(_lowerCamelCase )
dataset_infos_dict.write_to_directory(_lowerCamelCase )
_a : Union[str, Any] = DatasetInfosDict.from_directory(_lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : int = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , 'README.md' ) )
| 120 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase ,torch.Tensor ):
return image
elif isinstance(_lowerCamelCase ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0] ,PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(_lowerCamelCase ,axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(_lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0 ,3 ,1 ,2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] ,torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(_lowerCamelCase ,dim=0 )
return image
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0.9_9_9_5 )-> Union[str, Any]:
'''simple docstring'''
if not isinstance(_lowerCamelCase ,np.ndarray ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = va.device
SCREAMING_SNAKE_CASE_ = va.cpu().numpy()
SCREAMING_SNAKE_CASE_ = va.cpu().numpy()
SCREAMING_SNAKE_CASE_ = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) )
if np.abs(_lowerCamelCase ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE_ = (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE_ = np.arccos(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.sin(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = theta_a * t
SCREAMING_SNAKE_CASE_ = np.sin(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE_ = sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE_ = sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
return va
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = F.normalize(_lowerCamelCase ,dim=-1 )
SCREAMING_SNAKE_CASE_ = F.normalize(_lowerCamelCase ,dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> int:
'''simple docstring'''
for param in model.parameters():
SCREAMING_SNAKE_CASE_ = value
class snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , clip_model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , coca_model=__SCREAMING_SNAKE_CASE , coca_tokenizer=__SCREAMING_SNAKE_CASE , coca_transform=__SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = (
feature_extractor.size
if isinstance(feature_extractor.size , __SCREAMING_SNAKE_CASE )
else feature_extractor.size['''shortest_edge''']
)
SCREAMING_SNAKE_CASE_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __SCREAMING_SNAKE_CASE )
set_requires_grad(self.clip_model , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[str] , lowerCAmelCase_ : int = "auto" ) -> List[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
set_requires_grad(self.vae , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
set_requires_grad(self.vae , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
set_requires_grad(self.unet , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = min(int(num_inference_steps * strength ) , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=None ) -> str:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE )}''' )
SCREAMING_SNAKE_CASE_ = image.to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__SCREAMING_SNAKE_CASE )
]
SCREAMING_SNAKE_CASE_ = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
else:
SCREAMING_SNAKE_CASE_ = self.vae.encode(__SCREAMING_SNAKE_CASE ).latent_dist.sample(__SCREAMING_SNAKE_CASE )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE_ = 0.18_215 * init_latents
SCREAMING_SNAKE_CASE_ = init_latents.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ = randn_tensor(init_latents.shape , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
# get latents
SCREAMING_SNAKE_CASE_ = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = init_latents
return latents
def _lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.coca_transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extractor.preprocess(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE_ = self.clip_model.get_image_features(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = image_embeddings_clip.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE_ = self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE_ = torch.sqrt(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE_ = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE_ = 1 / 0.18_215 * sample
SCREAMING_SNAKE_CASE_ = self.vae.decode(__SCREAMING_SNAKE_CASE ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = transforms.Resize(self.feature_extractor_size )(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.normalize(__SCREAMING_SNAKE_CASE ).to(latents.dtype )
SCREAMING_SNAKE_CASE_ = self.clip_model.get_image_features(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = spherical_dist_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE_ = -torch.autograd.grad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE_ = noise_pred_original
else:
SCREAMING_SNAKE_CASE_ = noise_pred_original - torch.sqrt(__SCREAMING_SNAKE_CASE ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Union[str, Any] = 512 , lowerCAmelCase_ : Tuple = 512 , lowerCAmelCase_ : List[Any] = 0.6 , lowerCAmelCase_ : List[Any] = 50 , lowerCAmelCase_ : str = 7.5 , lowerCAmelCase_ : Dict = 1 , lowerCAmelCase_ : str = 0.0 , lowerCAmelCase_ : Dict = 100 , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : List[str] = "pil" , lowerCAmelCase_ : Tuple = True , lowerCAmelCase_ : List[Any] = 0.8 , lowerCAmelCase_ : Tuple = 0.1 , lowerCAmelCase_ : Optional[int] = 0.1 , ) -> Any:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(__SCREAMING_SNAKE_CASE )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__SCREAMING_SNAKE_CASE , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE_ = [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE_ = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
SCREAMING_SNAKE_CASE_ = [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE_ = ''', '''.join(__SCREAMING_SNAKE_CASE )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE_ = self.get_image_description(__SCREAMING_SNAKE_CASE )
if style_prompt is None:
if len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE_ = self.get_image_description(__SCREAMING_SNAKE_CASE )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE_ = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE_ = slerp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = text_embeddings.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE_ = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_offset:
SCREAMING_SNAKE_CASE_ = 1
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.get_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
SCREAMING_SNAKE_CASE_ = timesteps[:1].repeat(__SCREAMING_SNAKE_CASE )
# Preprocess image
SCREAMING_SNAKE_CASE_ = preprocess(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , text_embeddings.dtype , self.device , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = preprocess(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , text_embeddings.dtype , self.device , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = slerp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE_ = self.get_clip_image_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.get_clip_image_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = slerp(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer([''''''] , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = uncond_embeddings.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE_ = torch.randn(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device='''cpu''' , dtype=__SCREAMING_SNAKE_CASE ).to(
self.device )
else:
SCREAMING_SNAKE_CASE_ = torch.randn(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ = eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE_ = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE_ = generator
with self.progress_bar(total=__SCREAMING_SNAKE_CASE ):
for i, t in enumerate(__SCREAMING_SNAKE_CASE ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.cond_fn(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE_ = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(__SCREAMING_SNAKE_CASE ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__SCREAMING_SNAKE_CASE , nsfw_content_detected=__SCREAMING_SNAKE_CASE )
| 393 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __a :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = 13
lowerCAmelCase_ = 7
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = 99
lowerCAmelCase_ = 384
lowerCAmelCase_ = 2
lowerCAmelCase_ = 4
lowerCAmelCase_ = 37
lowerCAmelCase_ = '''gelu'''
lowerCAmelCase_ = 0.1
lowerCAmelCase_ = 0.1
lowerCAmelCase_ = 512
lowerCAmelCase_ = 16
lowerCAmelCase_ = 2
lowerCAmelCase_ = 0.0_2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = 128
lowerCAmelCase_ = 2
lowerCAmelCase_ = 9
lowerCAmelCase_ = 1
lowerCAmelCase_ = None
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase_ = [input_ids, input_mask]
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowerCamelCase : str =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[Any] =False
lowerCamelCase : List[str] =False
lowerCamelCase : Union[str, Any] =False
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = TFConvBertModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
lowerCAmelCase_ = True
if hasattr(__SCREAMING_SNAKE_CASE , '''use_cache''' ):
lowerCAmelCase_ = True
lowerCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase_ = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase_ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , '''saved_model''' , '''1''' )
lowerCAmelCase_ = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
lowerCAmelCase_ = outputs['''encoder_hidden_states''']
lowerCAmelCase_ = outputs['''encoder_attentions''']
else:
lowerCAmelCase_ = outputs['''hidden_states''']
lowerCAmelCase_ = outputs['''attentions''']
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
lowerCAmelCase_ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase_ = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = getattr(self.model_tester , '''key_length''' , __SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(UpperCAmelCase ):
lowerCAmelCase_ = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase_ = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase ):
lowerCAmelCase_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCAmelCase_ = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class __a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowerCAmelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase_ = [1, 6, 768]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) | 552 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
UpperCamelCase : int = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase : List[Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase : Optional[int] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase : List[Any] = False
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __A ( self ) -> str:
'''simple docstring'''
return 32
@property
def __A ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return 1_00
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
lowerCamelCase = MultilingualCLIP(__SCREAMING_SNAKE_CASE )
lowerCamelCase = text_encoder.eval()
return text_encoder
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = self.dummy_tokenizer
lowerCamelCase = self.dummy_unet
lowerCamelCase = self.dummy_movq
lowerCamelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCamelCase = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
lowerCamelCase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , A , A=0 ) -> List[str]:
'''simple docstring'''
lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__SCREAMING_SNAKE_CASE )
# create init_image
lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowerCamelCase = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase = output.images
lowerCamelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase = """A red cartoon frog, 4k"""
lowerCamelCase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowerCamelCase = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
lowerCamelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase , lowerCamelCase = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCamelCase = pipeline(
__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
lowerCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 457 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
a__ = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
a__ = "huggingface/label-files"
if "o365" in model_name:
a__ = 3_66
a__ = "object365-id2label.json"
else:
a__ = 91
a__ = "coco-detection-id2label.json"
a__ = num_labels
a__ = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
a__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = dct.pop(_lowerCamelCase )
a__ = val
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
a__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[:dim, :]
a__ = in_proj_bias[: dim]
a__ = in_proj_weight[
dim : dim * 2, :
]
a__ = in_proj_bias[
dim : dim * 2
]
a__ = in_proj_weight[
-dim :, :
]
a__ = in_proj_bias[-dim :]
# fmt: on
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
a__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[:hidden_size, :]
a__ = in_proj_bias[:hidden_size]
a__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
a__ = in_proj_bias[hidden_size : hidden_size * 2]
a__ = in_proj_weight[-hidden_size:, :]
a__ = in_proj_bias[-hidden_size:]
def _lowerCAmelCase ():
"""simple docstring"""
a__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
a__ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
a__ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'Model name {model_name} not supported' )
a__ = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
a__ = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
a__ = state_dict.pop(_lowerCamelCase )
a__ = val
if "input_proj" in key:
a__ = state_dict.pop(_lowerCamelCase )
a__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
a__ = state_dict.pop(_lowerCamelCase )
a__ = val
# finally, create HuggingFace model and load state dict
a__ = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
a__ = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_lowerCamelCase )
# load image processor
a__ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
a__ = prepare_img()
a__ = processor(images=_lowerCamelCase , return_tensors="pt" )
a__ = encoding["pixel_values"]
a__ = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
a__ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
a__ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
a__ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
a__ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ : Optional[int] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 331 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , _A : List[Any] , _A : Tuple=7 , _A : Tuple=3 , _A : Any=18 , _A : Any=30 , _A : Optional[Any]=400 , _A : int=True , _A : List[Any]=None , _A : Tuple=True , _A : List[str]=False , _A : List[str]=True , _A : Tuple=True , _A : Dict=[0.5, 0.5, 0.5] , _A : Optional[Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Optional[Any] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Union[str, Any] = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCAmelCase__ : Dict = do_thumbnail
UpperCAmelCase__ : int = do_align_axis
UpperCAmelCase__ : Dict = do_pad
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : List[Any] = image_mean
UpperCAmelCase__ : List[str] = image_std
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase_ ( __lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase__ = DonutImageProcessor if is_vision_available() else None
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_pad''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@is_flaky()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCAmelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase__ : str = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase__ : Optional[Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase__ : Union[str, Any] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 75 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[str]=32 * 4 , UpperCAmelCase_ : Optional[int]=32 * 6 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Any=32 , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = num_queries
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_size
SCREAMING_SNAKE_CASE : List[Any] = max_size
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : List[Any] = mask_feature_size
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5
).float()
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long()
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _A ( self : Dict ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _A ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = output.encoder_hidden_states
SCREAMING_SNAKE_CASE : str = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def _A ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = MaskFormerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[str] = MaskFormerForInstanceSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(UpperCAmelCase_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = model(
pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Union[str, Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _A ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _A ( self : Dict ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _A ( self : int ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _A ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _A ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : List[str] ):
pass
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@slow
def _A ( self : str ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE : Dict = MaskFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE : Dict = {
"pixel_values": torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ),
"mask_labels": torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE ),
"class_labels": torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE ).long(),
}
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _A ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _A ( self : Dict ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE : Tuple = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
SCREAMING_SNAKE_CASE : Tuple = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : List[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case = 1e-4
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : List[str] ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[str] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
SCREAMING_SNAKE_CASE : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : Optional[int] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
SCREAMING_SNAKE_CASE : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : str = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : int = inputs["pixel_values"].to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE : int = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 62 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
def lowercase ( __A : int ) -> int:
'''simple docstring'''
snake_case : Optional[int] = abs(_lowerCamelCase )
snake_case : Tuple = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase ( __A : int ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase ( __A : int ) -> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def lowercase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A : Callable , __A : int ) -> None:
snake_case : Union[str, Any] = f"""{func.__name__}({value})"""
snake_case : Optional[int] = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 36 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> Dict:
lowercase_ = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowercase_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowercase_ = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowercase_ = tf_top_k_top_p_filtering(__SCREAMING_SNAKE_CASE , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
lowercase_ = output[output != -float('''inf''' )]
lowercase_ = tf.cast(
tf.where(tf.not_equal(__SCREAMING_SNAKE_CASE , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1e-12 )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_tf
class lowercase__( unittest.TestCase , __lowerCAmelCase ):
"""simple docstring"""
if is_tf_available():
a :Tuple = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ = 2
lowercase_ = 2
class lowercase__( tf.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> Any:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
lowercase_ = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
lowercase_ = [[2, 0], [1_0_2, 1_0_3]]
lowercase_ = [[1, 0], [1, 1]]
lowercase_ = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
lowercase_ = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for batch_size in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 ):
lowercase_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowercase_ = serving_func(**__SCREAMING_SNAKE_CASE )['''sequences''']
lowercase_ = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : Dict ) -> int:
lowercase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ = 1
lowercase_ = 2
class lowercase__( tf.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
lowercase_ = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
lowercase_ = [[2], [1_0_2, 1_0_3]]
lowercase_ = [[1], [1, 1]]
lowercase_ = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={'''serving_default''': dummy_model.serving} )
lowercase_ = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures['''serving_default''']
for input_row in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
lowercase_ = serving_func(**__SCREAMING_SNAKE_CASE )['''sequences''']
lowercase_ = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def _lowercase ( self : str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=__SCREAMING_SNAKE_CASE )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int ) -> Optional[Any]:
super().__init__()
lowercase_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__SCREAMING_SNAKE_CASE , '''spiece.model''' ) , '''rb''' ).read() )
lowercase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> str:
lowercase_ = self.tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ = text.pad_model_inputs(
__SCREAMING_SNAKE_CASE , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
lowercase_ = self.model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(__SCREAMING_SNAKE_CASE )
lowercase_ = CompleteSentenceTransformer()
lowercase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
lowercase_ = complete_model(__SCREAMING_SNAKE_CASE )
lowercase_ = tf.keras.Model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
keras_model.save(__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ) -> Any:
lowercase_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
lowercase_ = 1_4
lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ = '''Hello, my dog is cute and'''
lowercase_ = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
lowercase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase_ = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowercase_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
lowercase_ = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowercase ( self : Dict ) -> List[str]:
lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ = '''Hugging Face is a technology company based in New York and Paris.'''
lowercase_ = bart_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' ).input_ids
lowercase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
class lowercase__( __lowerCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
lowercase_ = bart_model.generate(__SCREAMING_SNAKE_CASE , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
class lowercase__( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> int:
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ = FakeEncoder(bart_model.config , bart_model.model.shared )
lowercase_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowercase_ = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__SCREAMING_SNAKE_CASE , foo='''bar''' )
| 97 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
lowercase_ : Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowercase_ : List[str] = bs[:]
lowercase_ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Any = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def __magic_name__ ( lowercase ) -> Dict:
"""simple docstring"""
lowercase_ : Any = set()
lowercase_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Optional[int] = char
return pairs
class UpperCamelCase__ ( __lowerCAmelCase ):
'''simple docstring'''
__a : Any = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Any = ['''input_ids''', '''attention_mask''']
def __init__( self, snake_case__, snake_case__, snake_case__="replace", snake_case__="<s>", snake_case__="</s>", snake_case__="</s>", snake_case__="<s>", snake_case__="<unk>", snake_case__="<pad>", snake_case__="<mask>", snake_case__=False, **snake_case__, ) -> List[str]:
"""simple docstring"""
lowercase_ : int = AddedToken(__SCREAMING_SNAKE_CASE, lstrip=__SCREAMING_SNAKE_CASE, rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) else bos_token
lowercase_ : int = AddedToken(__SCREAMING_SNAKE_CASE, lstrip=__SCREAMING_SNAKE_CASE, rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) else eos_token
lowercase_ : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE, lstrip=__SCREAMING_SNAKE_CASE, rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) else sep_token
lowercase_ : int = AddedToken(__SCREAMING_SNAKE_CASE, lstrip=__SCREAMING_SNAKE_CASE, rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) else cls_token
lowercase_ : str = AddedToken(__SCREAMING_SNAKE_CASE, lstrip=__SCREAMING_SNAKE_CASE, rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) else unk_token
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE, lstrip=__SCREAMING_SNAKE_CASE, rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : List[str] = AddedToken(__SCREAMING_SNAKE_CASE, lstrip=__SCREAMING_SNAKE_CASE, rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=__SCREAMING_SNAKE_CASE, bos_token=__SCREAMING_SNAKE_CASE, eos_token=__SCREAMING_SNAKE_CASE, unk_token=__SCREAMING_SNAKE_CASE, sep_token=__SCREAMING_SNAKE_CASE, cls_token=__SCREAMING_SNAKE_CASE, pad_token=__SCREAMING_SNAKE_CASE, mask_token=__SCREAMING_SNAKE_CASE, add_prefix_space=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE, )
with open(__SCREAMING_SNAKE_CASE, encoding="""utf-8""" ) as vocab_handle:
lowercase_ : Optional[int] = json.load(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Dict = errors # how to handle errors in decoding
lowercase_ : Tuple = bytes_to_unicode()
lowercase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(__SCREAMING_SNAKE_CASE, encoding="""utf-8""" ) as merges_handle:
lowercase_ : Dict = merges_handle.read().split("""\n""" )[1:-1]
lowercase_ : str = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE, range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowercase_ : Optional[Any] = {}
lowercase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : Dict = re.compile(r"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
return len(self.encoder )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case__ ( self, snake_case__ ) -> List[str]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[int] = tuple(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowercase_ : List[str] = min(__SCREAMING_SNAKE_CASE, key=lambda snake_case__ : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE, float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Tuple = bigram
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
lowercase_ : Optional[Any] = word.index(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : str = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Optional[Any] = tuple(__SCREAMING_SNAKE_CASE )
lowercase_ : str = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase_ : List[str] = get_pairs(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = """ """.join(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = word
return word
def snake_case__ ( self, snake_case__ ) -> Any:
"""simple docstring"""
lowercase_ : Any = []
for token in re.findall(self.pat, __SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) )
return bpe_tokens
def snake_case__ ( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(__SCREAMING_SNAKE_CASE, self.encoder.get(self.unk_token ) )
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
return self.decoder.get(__SCREAMING_SNAKE_CASE )
def snake_case__ ( self, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = """""".join(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""", errors=self.errors )
return text
def snake_case__ ( self, snake_case__, snake_case__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : str = os.path.join(
__SCREAMING_SNAKE_CASE, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : Union[str, Any] = os.path.join(
__SCREAMING_SNAKE_CASE, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__SCREAMING_SNAKE_CASE, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__SCREAMING_SNAKE_CASE, ensure_ascii=__SCREAMING_SNAKE_CASE ) + """\n""" )
lowercase_ : str = 0
with open(__SCREAMING_SNAKE_CASE, """w""", encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowercase_ : Optional[int] = token_index
writer.write(""" """.join(__SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
return vocab_file, merge_file
def snake_case__ ( self, snake_case__, snake_case__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
lowercase_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE, token_ids_a=__SCREAMING_SNAKE_CASE, already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def snake_case__ ( self, snake_case__, snake_case__ = None ) -> List[int]:
"""simple docstring"""
lowercase_ : Any = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self, snake_case__, snake_case__=False, **snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = kwargs.pop("""add_prefix_space""", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
lowercase_ : List[str] = """ """ + text
return (text, kwargs)
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = PaddingStrategy.DO_NOT_PAD, snake_case__ = None, snake_case__ = None, ) -> dict:
"""simple docstring"""
lowercase_ : Dict = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE, padding_strategy=__SCREAMING_SNAKE_CASE, pad_to_multiple_of=__SCREAMING_SNAKE_CASE, return_attention_mask=__SCREAMING_SNAKE_CASE, )
# Load from model defaults
if return_attention_mask is None:
lowercase_ : Dict = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ : str = len(encoded_inputs["""global_attention_mask"""] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ : List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ : Optional[int] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 458 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 0 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : List[Any] = AlbertConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
_a : Tuple = AlbertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 120 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
A_ = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def UpperCAmelCase ( UpperCAmelCase )-> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
A_ = parser.parse_args()
if args.check_lib:
A_ = importlib.import_module("transformers")
A_ = Path(transformers_module.__file__).parent
else:
A_ = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 393 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __a ( __lowerCAmelCase ):
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = SMALL_MODEL_IDENTIFIER
lowerCAmelCase_ = '''pt'''
lowerCAmelCase_ = '''tf'''
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = TFAutoModel.from_pretrained(self.test_model , from_pt=__SCREAMING_SNAKE_CASE )
model_tf.save_pretrained(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = '''mock_framework'''
# Framework provided - return whatever the user provides
lowerCAmelCase_ = FeaturesManager.determine_framework(self.test_model , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase_ = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase_ = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase_ = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = FeaturesManager.determine_framework(self.test_model ) | 552 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowercase ( __lowerCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __A ( A ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 457 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = filter(lambda _lowercase : p.requires_grad , model.parameters() )
a__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase_ : Optional[Any] = logging.getLogger(__name__)
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
if metric == "rouge2":
a__ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a__ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a__ = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
a__ = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
a__ = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class lowerCamelCase__ ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Any ,a__ : Tuple ,a__ : str ):
a__ = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowerCAmelCase_ ( self : List[Any] ,a__ : List[str] ,a__ : int ,a__ : Tuple ,a__ : str=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
a__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
a__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
a__ = od / "test_results.txt"
a__ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a__ = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
a__ = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE ,"a+" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
a__ = metrics[key]
if isinstance(__SCREAMING_SNAKE_CASE ,torch.Tensor ):
a__ = val.item()
a__ = f'{key}: {val:.6f}\n'
writer.write(__SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
a__ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowerCAmelCase_ ( self : List[str] ,a__ : List[Any] ,a__ : Any ):
try:
a__ = pl_module.model.model.num_parameters()
except AttributeError:
a__ = pl_module.model.num_parameters()
a__ = count_trainable_parameters(__SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase_ ( self : int ,a__ : int ,a__ : str ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,"test" )
@rank_zero_only
def lowerCAmelCase_ ( self : Any ,a__ : Union[str, Any] ,a__ : Dict ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 331 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionSAGPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = False
def lowercase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase__ : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase__ : List[Any] = CLIPTextModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Tuple , _A : List[Any] , _A : List[Any]=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCAmelCase__ : str = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase__ : List[Any] = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[str] = '''.'''
UpperCAmelCase__ : List[str] = torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase__ : List[Any] = output.images
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : List[str] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase__ : Optional[int] = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Union[str, Any] = '''.'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase__ : Any = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : int = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase__ : Union[str, Any] = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = '''.'''
UpperCAmelCase__ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
UpperCAmelCase__ : str = output.images
assert image.shape == (1, 512, 768, 3)
| 75 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] = 16 , UpperCAmelCase_ : List[Any] = 88 , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : str = 1 , UpperCAmelCase_ : Any = 0.0 , UpperCAmelCase_ : Tuple = 32 , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : Optional[int] = False , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Optional[int] = "geglu" , UpperCAmelCase_ : Tuple = None , ):
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE : Dict = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE : Dict = [1, 0]
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any] = True , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE : List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE : Tuple = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE : int = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE : str = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
| 62 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 0 |
import math
import os
import sys
def lowercase ( __A : str ) -> str:
'''simple docstring'''
snake_case : Dict = """"""
try:
with open(_lowerCamelCase , """rb""" ) as binary_file:
snake_case : Tuple = binary_file.read()
for dat in data:
snake_case : Optional[int] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase ( __A : dict[str, str] , __A : str , __A : int , __A : str ) -> None:
'''simple docstring'''
lexicon.pop(_lowerCamelCase )
snake_case : Union[str, Any] = last_match_id
if math.loga(_lowerCamelCase ).is_integer():
for curr_key in lexicon:
snake_case : str = """0""" + lexicon[curr_key]
snake_case : List[Any] = bin(_lowerCamelCase )[2:]
def lowercase ( __A : str ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = {"""0""": """0""", """1""": """1"""}
snake_case , snake_case : Optional[Any] = """""", """"""
snake_case : Optional[int] = len(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case : Optional[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
index += 1
snake_case : Union[str, Any] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
snake_case : str = lexicon[curr_string]
result += last_match_id
return result
def lowercase ( __A : str , __A : str ) -> str:
'''simple docstring'''
snake_case : Tuple = os.path.getsize(_lowerCamelCase )
snake_case : Optional[int] = bin(_lowerCamelCase )[2:]
snake_case : Optional[int] = len(_lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase ( __A : str , __A : str ) -> None:
'''simple docstring'''
snake_case : List[str] = 8
try:
with open(_lowerCamelCase , """wb""" ) as opened_file:
snake_case : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_lowerCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase ( __A : str , __A : str ) -> None:
'''simple docstring'''
snake_case : Optional[Any] = read_file_binary(_lowerCamelCase )
snake_case : List[Any] = compress_data(_lowerCamelCase )
snake_case : Dict = add_file_length(_lowerCamelCase , _lowerCamelCase )
write_file_binary(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 36 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 0 |
from random import randint, random
def a ( snake_case__: int , snake_case__: int , snake_case__: int , snake_case__: bool = False , snake_case__: bool = False , snake_case__: int = 5 , ):
'''simple docstring'''
lowercase_ = [[-1] * number_of_cells] # Create a highway without any car
lowercase_ = 0
lowercase_ = max(_lowerCamelCase , 0 )
while i < number_of_cells:
lowercase_ = (
randint(0 , _lowerCamelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a ( snake_case__: list , snake_case__: int ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = highway_now[car_index + 1 :]
for cell in range(len(_lowerCamelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowerCamelCase , -1 )
def a ( snake_case__: list , snake_case__: float , snake_case__: int ):
'''simple docstring'''
lowercase_ = len(_lowerCamelCase )
# Beforce calculations, the highway is empty
lowercase_ = [-1] * number_of_cells
for car_index in range(_lowerCamelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase_ = min(highway_now[car_index] + 1 , _lowerCamelCase )
# Number of empty cell before the next car
lowercase_ = get_distance(_lowerCamelCase , _lowerCamelCase ) - 1
# We can't have the car causing an accident
lowercase_ = min(next_highway[car_index] , _lowerCamelCase )
if random() < probability:
# Randomly, a driver will slow down
lowercase_ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a ( snake_case__: list , snake_case__: int , snake_case__: float , snake_case__: int ):
'''simple docstring'''
lowercase_ = len(highway[0] )
for i in range(_lowerCamelCase ):
lowercase_ = update(highway[i] , _lowerCamelCase , _lowerCamelCase )
lowercase_ = [-1] * number_of_cells
for car_index in range(_lowerCamelCase ):
lowercase_ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase_ = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase_ = speed
highway.append(_lowerCamelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
import pprint
import requests
UpperCAmelCase_ = '''https://zenquotes.io/api'''
def __magic_name__ ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __magic_name__ ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase_ = random_quotes()
pprint.pprint(response) | 458 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : list , _snake_case : int ) -> int:
'''simple docstring'''
_A = len(_lowerCamelCase )
_A = int(math.floor(math.sqrt(_lowerCamelCase ) ) )
_A = 0
while arr[min(_lowerCamelCase , _lowerCamelCase ) - 1] < x:
_A = step
step += int(math.floor(math.sqrt(_lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_A = prev + 1
if prev == min(_lowerCamelCase , _lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
a = input('''Enter numbers separated by a comma:\n''').strip()
a = [int(item) for item in user_input.split(''',''')]
a = int(input('''Enter the number to be searched:\n'''))
a = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F'''Number {x} is at index {res}''')
| 7 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Dict = tmp_path / 'file.csv'
_a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Optional[int] = tmp_path / 'malformed_file.csv'
_a : List[str] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Any = tmp_path / 'csv_with_image.csv'
_a : List[Any] = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Dict = tmp_path / 'csv_with_label.csv'
_a : Optional[int] = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Optional[int] = tmp_path / 'csv_with_int_list.csv'
_a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : Dict = Csv()
_a : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCamelCase , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase_ ( A ):
'''simple docstring'''
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_a : Any = f.read().splitlines()[1]
_a : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
_a : List[str] = csv._generate_tables([[csv_file_with_image]] )
_a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
_a : List[str] = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase_ ( A ):
'''simple docstring'''
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_a : List[Any] = f.read().splitlines()[1:]
_a : Dict = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
_a : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
_a : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
_a : Tuple = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_lowerCamelCase ) for label in labels]
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Dict = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda A : [int(_lowerCamelCase ) for i in x.split()]} )
_a : Dict = csv._generate_tables([[csv_file_with_int_list]] )
_a : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
_a : List[str] = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 120 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A_ = '''\
'''
A_ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
A_ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any = 16 , lowerCAmelCase_ : List[Any] = True , lowerCAmelCase_ : List[Any]=None ) -> Any:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE_ = '''cuda'''
else:
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = model.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE_ = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE_ = model.config.max_length
SCREAMING_SNAKE_CASE_ = tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , return_attention_mask=__SCREAMING_SNAKE_CASE , ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
SCREAMING_SNAKE_CASE_ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = min(start_index + batch_size , len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE_ = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE_ = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).logits
SCREAMING_SNAKE_CASE_ = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE_ = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE_ = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 393 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCAmelCase ( _lowercase : Optional[int] ) -> Any:
"""simple docstring"""
return getitem, k
def UpperCAmelCase ( _lowercase : List[str] , _lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
return setitem, k, v
def UpperCAmelCase ( _lowercase : str ) -> Any:
"""simple docstring"""
return delitem, k
def UpperCAmelCase ( _lowercase : Optional[int] , _lowercase : str , *_lowercase : Tuple ) -> List[str]:
"""simple docstring"""
try:
return fun(_lowerCamelCase , *_lowerCamelCase ), None
except Exception as e:
return None, e
lowercase_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowercase_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowercase_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowercase_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowercase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowercase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def UpperCAmelCase ( _lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ = HashMap(initial_block_size=4 )
lowerCAmelCase_ = {}
for _, (fun, *args) in enumerate(_lowerCamelCase ):
lowerCAmelCase_ , lowerCAmelCase_ = _run_operation(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = _run_operation(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase )
assert my_res == py_res
assert str(_lowerCamelCase ) == str(_lowerCamelCase )
assert set(_lowerCamelCase ) == set(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
assert set(my.items() ) == set(py.items() )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(_lowercase : str ) -> bool:
return not name.startswith('''_''' )
lowerCAmelCase_ = {name for name in dir({} ) if is_public(_lowerCamelCase )}
lowerCAmelCase_ = {name for name in dir(HashMap() ) if is_public(_lowerCamelCase )}
assert dict_public_names > hash_public_names | 552 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 0 |
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 457 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : str = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 331 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
UpperCamelCase__ = range(2, 2_0 + 1)
UpperCamelCase__ = [1_0**k for k in range(ks[-1] + 1)]
UpperCamelCase__ = {}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[Any] = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase ) ) )
UpperCAmelCase__ : Any = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase ) , _lowerCamelCase ) ) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0
UpperCAmelCase__ : Optional[int] = n - i
UpperCAmelCase__ : Union[str, Any] = memo.get(_lowerCamelCase )
if sub_memo is not None:
UpperCAmelCase__ : Optional[Any] = sub_memo.get(_lowerCamelCase )
if jumps is not None and len(_lowerCamelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase__ : Dict = -1
for _k in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase__ : Any = _k
break
if max_jump >= 0:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase__ : Dict = diff + c
for j in range(min(_lowerCamelCase , len(_lowerCamelCase ) ) ):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = divmod(_lowerCamelCase , 10 )
if new_c > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
UpperCAmelCase__ : Dict = []
else:
UpperCAmelCase__ : List[Any] = {c: []}
UpperCAmelCase__ : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase__ , UpperCAmelCase__ : Dict = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase__ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase__ : Any = 0
while j < len(_lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
if i >= n:
return 0, i
if k > len(_lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase__ : int = i
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = 0, 0, 0
for j in range(len(_lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase__ : List[str] = ds_c + ds_b
diff += addend
UpperCAmelCase__ : Dict = 0
for j in range(_lowerCamelCase ):
UpperCAmelCase__ : Any = a_i[j] + addend
UpperCAmelCase__ , UpperCAmelCase__ : Dict = divmod(_lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return diff, i - start_i
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
UpperCAmelCase__ : str = digits[j] + addend
if s >= 10:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = divmod(_lowerCamelCase , 10 )
UpperCAmelCase__ : Tuple = addend // 10 + quotient
else:
UpperCAmelCase__ : str = s
UpperCAmelCase__ : List[str] = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(_lowerCamelCase , 10 )
digits.append(_lowerCamelCase )
def a__ ( lowerCAmelCase__ = 10**15 ) -> int:
UpperCAmelCase__ : Optional[int] = [1]
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : str = 0
while True:
UpperCAmelCase__ , UpperCAmelCase__ : str = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase__ : Any = 0
for j in range(len(_lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = VQModel
UpperCamelCase_ : str = '''sample'''
@property
def _A ( self : Union[str, Any] , UpperCAmelCase_ : str=(32, 32) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(__SCREAMING_SNAKE_CASE )
return {"sample": image}
@property
def _A ( self : Any ):
return (3, 32, 32)
@property
def _A ( self : List[str] ):
return (3, 32, 32)
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Any = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _A ( self : Tuple ):
pass
def _A ( self : List[Any] ):
pass
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__SCREAMING_SNAKE_CASE ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE : str = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE : str = image.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(__SCREAMING_SNAKE_CASE ).sample
SCREAMING_SNAKE_CASE : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 62 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
def lowercase ( __A : int , __A : int ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowercase ( __A : int , __A : int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowercase ( __A : int , __A : int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowercase ( __A : int , __A : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowercase ( __A : int , __A : int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 0 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__( __lowerCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_heads''' ) )
class lowercase__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[1_6, 4_8, 9_6] , SCREAMING_SNAKE_CASE_ : List[str]=[1, 3, 6] , SCREAMING_SNAKE_CASE_ : List[Any]=[1, 2, 1_0] , SCREAMING_SNAKE_CASE_ : Tuple=[7, 3, 3] , SCREAMING_SNAKE_CASE_ : int=[4, 2, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[2, 1, 1] , SCREAMING_SNAKE_CASE_ : Any=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : Any=[False, False, True] , SCREAMING_SNAKE_CASE_ : int=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : str=1e-12 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : int=2 , ) -> Optional[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_sizes
lowercase_ = patch_stride
lowercase_ = patch_padding
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = num_labels
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = num_heads
lowercase_ = stride_kv
lowercase_ = depth
lowercase_ = cls_token
lowercase_ = attention_drop_rate
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
def _lowercase ( self : Union[str, Any] ) -> int:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[str] ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
lowercase_ = CvtModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(__SCREAMING_SNAKE_CASE )
lowercase_ = (self.image_size, self.image_size)
lowercase_ , lowercase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
lowercase_ = self.num_labels
lowercase_ = CvtForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[str] ) -> str:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :int = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a :Union[str, Any] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
a :Union[str, Any] = False
a :Optional[int] = False
a :Union[str, Any] = False
a :Optional[int] = False
a :str = False
def _lowercase ( self : str ) -> str:
lowercase_ = CvtModelTester(self )
lowercase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def _lowercase ( self : Tuple ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : str ) -> Tuple:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _lowercase ( self : int ) -> List[str]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _lowercase ( self : int ) -> List[Any]:
pass
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ):
lowercase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowercase_ = outputs.hidden_states
lowercase_ = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self : str ) -> int:
pass
@slow
def _lowercase ( self : List[str] ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = CvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ) -> List[str]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 97 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCAmelCase_ = logging.getLogger(__name__)
def __magic_name__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=_lowerCamelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=_lowerCamelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=_lowerCamelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=_lowerCamelCase , default="""data/dump""" , help="""The dump file prefix.""" )
lowercase_ : Dict = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowercase_ : Dict = BertTokenizer.from_pretrained(args.tokenizer_name )
lowercase_ : Optional[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
lowercase_ : Any = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowercase_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowercase_ : Tuple = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
lowercase_ : List[str] = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
lowercase_ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowercase_ : int = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
lowercase_ : Dict = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
lowercase_ : List[Any] = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"""{len(_lowerCamelCase )} examples to process.""" )
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
lowercase_ : Optional[int] = 10000
lowercase_ : Optional[int] = time.time()
for text in data:
lowercase_ : Any = f"""{bos} {text.strip()} {sep}"""
lowercase_ : str = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
lowercase_ : int = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowercase_ : str = time.time()
logger.info("""Finished binarization""" )
logger.info(f"""{len(_lowerCamelCase )} examples processed.""" )
lowercase_ : List[str] = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowercase_ : Optional[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowercase_ : int = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
lowercase_ : str = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(_lowerCamelCase , """wb""" ) as handle:
pickle.dump(rslt_ , _lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main() | 458 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
_A = number_of_bytes // partitions
_A = []
for i in range(_lowerCamelCase ):
_A = i * bytes_per_partition + 1
_A = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 0 |
'''simple docstring'''
from itertools import permutations
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_a : int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(_lowerCamelCase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase_ ( A = 1_0 ):
'''simple docstring'''
return sum(
int(''.join(map(_lowerCamelCase , _lowerCamelCase ) ) )
for num in permutations(range(_lowerCamelCase ) )
if is_substring_divisible(_lowerCamelCase ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 120 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 0 |
A_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
order.append(_lowerCamelCase )
return order
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return component
def UpperCAmelCase ( UpperCAmelCase )-> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(_lowerCamelCase ) * [False]
SCREAMING_SNAKE_CASE_ = {vert: [] for vert in range(len(_lowerCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = []
for i, was_visited in enumerate(_lowerCamelCase ):
if not was_visited:
order += topology_sort(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(_lowerCamelCase ) * [False]
for i in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ = order[len(_lowerCamelCase ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_ = find_components(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
components_list.append(_lowerCamelCase )
return components_list
| 393 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase_ = logging.get_logger(__name__)
class __a ( __lowerCAmelCase ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) | 552 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
try:
lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase : Optional[Any] = parse_flag_from_env("RUN_SLOW", default=False)
UpperCAmelCase : Union[str, Any] = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCAmelCase : Optional[Any] = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCAmelCase : Union[str, Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCAmelCase : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCAmelCase : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCAmelCase : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCAmelCase : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ",
)
# Beam
UpperCAmelCase : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCAmelCase : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCAmelCase : int = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
lowerCamelCase = unittest.skip("""test requires faiss""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
lowerCamelCase = unittest.skip("""test requires regex""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase = unittest.skip("""test requires elasticsearch""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase = unittest.skip("""test requires sqlalchemy""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
lowerCamelCase = unittest.skip("""test requires PyTorch""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if not config.TF_AVAILABLE:
lowerCamelCase = unittest.skip("""test requires TensorFlow""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
lowerCamelCase = unittest.skip("""test requires JAX""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
lowerCamelCase = unittest.skip("""test requires Pillow""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_lowerCamelCase )
else:
return test_case
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_lowerCamelCase )
else:
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_lowerCamelCase )
else:
return test_case
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
def _require_spacy_model(lowerCamelCase__ : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip("""test requires spacy""" )(_lowerCamelCase )
except OSError:
return unittest.skip("""test requires spacy model \'{}\'""".format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_lowerCamelCase )
else:
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_lowerCamelCase )
else:
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase = unittest.skip("""test is slow""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase = unittest.skip("""test is local""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase = unittest.skip("""test is packaged""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase = unittest.skip("""test requires remote""" )(_lowerCamelCase )
return test_case
def __lowerCamelCase ( *lowerCamelCase__ : str ):
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith("""test""" ):
for decorator in decorators:
lowerCamelCase = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class __lowercase ( __lowerCAmelCase ):
"""simple docstring"""
pass
class __lowercase ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase : List[str] = 0
UpperCamelCase : Dict = 1
UpperCamelCase : List[Any] = 2
@contextmanager
def __lowerCamelCase ( lowerCamelCase__ : Dict=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase__ : Optional[int]=1E-16 ):
'''simple docstring'''
lowerCamelCase = requests.Session().request
def timeout_request(lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , **lowerCamelCase__ : Any ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
lowerCamelCase = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase = url
lowerCamelCase = e.args[0]
lowerCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , f'OfflineMock[{url}]' ),)
lowerCamelCase = (max_retry_error,)
raise
def raise_connection_error(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def __lowerCamelCase ( *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def __lowerCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowerCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : int ):
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 100 , 10 ).tolist()
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase__ : int , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith("""500""" ) or str(_lowerCamelCase ).startswith("""502""" ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = returncode
lowerCamelCase = stdout
lowerCamelCase = stderr
async def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
while True:
lowerCamelCase = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Dict=False , lowerCamelCase__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(_lowerCamelCase ) )
lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase = []
lowerCamelCase = []
def tee(lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict="" ):
lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase__ : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda lowerCamelCase__ : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="""stderr:""" ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[Any]=180 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : int=True ):
'''simple docstring'''
lowerCamelCase = asyncio.get_event_loop()
lowerCamelCase = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
lowerCamelCase = """ """.join(_lowerCamelCase )
if result.returncode > 0:
lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
lowerCamelCase = re.sub(R"""^gw""" , """""" , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = 29500
lowerCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 457 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _lowerCAmelCase (_lowercase = 3 ):
"""simple docstring"""
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_lowerCamelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
a__ = QuantumRegister(_lowerCamelCase , "qr" )
a__ = ClassicalRegister(_lowerCamelCase , "cr" )
a__ = QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
a__ = number_of_qubits
for i in range(_lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase )
# simulate with 10000 shots
a__ = Aer.get_backend("qasm_simulator" )
a__ = execute(_lowerCamelCase , _lowerCamelCase , shots=1_00_00 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}"
)
| 331 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCAmelCase__ : Dict = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : str = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
UpperCAmelCase__ : Tuple = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCAmelCase__ : Dict = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCamelCase__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCamelCase__ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 75 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 0 |
import os
snake_case = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Dict = 0
while index < len(_lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ""
SCREAMING_SNAKE_CASE : List[str] = num // 1000
numerals += m_count * "M"
num %= 1000
SCREAMING_SNAKE_CASE : Dict = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
SCREAMING_SNAKE_CASE : Union[str, Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase__ ( lowercase = "/p089_roman.txt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(os.path.dirname(_lowerCamelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE : Dict = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE : Union[str, Any] = line.strip()
SCREAMING_SNAKE_CASE : Tuple = parse_roman_numerals(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = generate_roman_numerals(_lowerCamelCase )
savings += len(_lowerCamelCase ) - len(_lowerCamelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 62 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 0 |
from __future__ import annotations
def lowercase ( __A : list[float] , __A : Dict ) -> List[Any]:
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_lowerCamelCase ):
print(f"""{i}\t\t{d}""" )
def lowercase ( __A : list[dict[str, int]] , __A : list[float] , __A : int ) -> Any:
'''simple docstring'''
for j in range(_lowerCamelCase ):
snake_case , snake_case , snake_case : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowercase ( __A : list[dict[str, int]] , __A : int , __A : int , __A : int ) -> list[float]:
'''simple docstring'''
snake_case : Tuple = [float("""inf""" )] * vertex_count
snake_case : List[Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowerCamelCase ):
snake_case , snake_case , snake_case : Optional[int] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case : Dict = distance[u] + w
snake_case : int = check_negative_cycle(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Dict = int(input('''Enter number of vertices: ''').strip())
__lowercase : List[str] = int(input('''Enter number of edges: ''').strip())
__lowercase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__lowercase : int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__lowercase : Optional[Any] = {'''src''': src, '''dst''': dest, '''weight''': weight}
__lowercase : Optional[Any] = int(input('''\nEnter shortest path source:''').strip())
__lowercase : int = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 36 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a ( *snake_case__: str , snake_case__: Optional[Union[Dict, Any]] = None , snake_case__: List[Any]=True , snake_case__: str=2 ):
'''simple docstring'''
from .. import __version__
lowercase_ = take_from
lowercase_ = ()
if not isinstance(args[0] , _lowerCamelCase ):
lowercase_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
lowercase_ = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
lowercase_ = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
lowercase_ = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
lowercase_ = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
lowercase_ = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
lowercase_ = inspect.getouterframes(inspect.currentframe() )[1]
lowercase_ = call_frame.filename
lowercase_ = call_frame.lineno
lowercase_ = call_frame.function
lowercase_ , lowercase_ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
lowercase_ : Any = GPTaConfig()
else:
lowercase_ : Optional[Any] = GPTaConfig.from_json_file(_lowerCamelCase )
lowercase_ : Tuple = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
lowercase_ : List[str] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
UpperCAmelCase_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 458 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = ['''input_features''', '''attention_mask''']
def __init__( self : Optional[int] , _UpperCAmelCase : Any=80 , _UpperCAmelCase : int=16_000 , _UpperCAmelCase : Optional[int]=80 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : List[Any] , ):
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_A = num_mel_bins
_A = do_ceptral_normalize
_A = normalize_means
_A = normalize_vars
_A = True
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Any , ):
_A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_A = torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
_A = ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str = True , _UpperCAmelCase : List[Any] = True , _UpperCAmelCase : List[Any] = 0.0 , ):
if normalize_means:
_A = x[:input_length].mean(axis=0 )
_A = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if normalize_vars:
_A = x[:input_length].std(axis=0 )
_A = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
_A = padding_value
# make sure array is in float32
_A = x.astype(np.floataa )
return x
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple = None ):
_A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
def __call__( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] = False , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : List[str] = False , _UpperCAmelCase : int = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : Tuple = None , _UpperCAmelCase : Tuple = None , **_UpperCAmelCase : Any , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_A = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
_A = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [raw_speech]
# extract fbank features
_A = [self._extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
_A = BatchFeature({'input_features': features} )
_A = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# make sure list is in array format
_A = padded_inputs.get('input_features' )
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
_A = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
_A = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_A = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_A = (
np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_A = self.normalize(
padded_inputs['input_features'] , attention_mask=__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 7 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 0 |
'''simple docstring'''
import random
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : Tuple = a[left_index]
_a : Optional[Any] = left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
_a , _a : int = a[i], a[j]
i += 1
_a , _a : List[Any] = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
if left < right:
_a : Dict = random.randint(_lowerCamelCase , right - 1 )
_a , _a : Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_a : Union[str, Any] = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Union[str, Any] = input('Enter numbers separated by a comma:\n' ).strip()
_a : Optional[Any] = [int(_lowerCamelCase ) for item in user_input.split(',' )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 120 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = GPTSanJapaneseTokenizer
UpperCAmelCase : int = False
UpperCAmelCase : List[str] = {'''do_clean_text''': False, '''add_prefix_space''': False}
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE_ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self : Any , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
SCREAMING_SNAKE_CASE_ = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def _lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.get_input_output_texts(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return text, ids
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ = '''こんにちは、世界。 こんばんは、㔺界。'''
SCREAMING_SNAKE_CASE_ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
SCREAMING_SNAKE_CASE_ = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ = '''こんにちは、世界。'''
SCREAMING_SNAKE_CASE_ = '''こんばんは、㔺界。😀'''
SCREAMING_SNAKE_CASE_ = '''こんにちは、世界。こんばんは、世界。😀'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ = '''こんにちは、世界。'''
SCREAMING_SNAKE_CASE_ = '''こんばんは、㔺界。😀'''
SCREAMING_SNAKE_CASE_ = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
SCREAMING_SNAKE_CASE_ = len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) - 2
SCREAMING_SNAKE_CASE_ = [1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE_ = [1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE_ = tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ = tokenizer(__SCREAMING_SNAKE_CASE , prefix_text=__SCREAMING_SNAKE_CASE ).token_type_ids
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''あンいワ''' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(tokenizer.decode(__SCREAMING_SNAKE_CASE ) , tokenizer.decode(__SCREAMING_SNAKE_CASE ) )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
SCREAMING_SNAKE_CASE_ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
SCREAMING_SNAKE_CASE_ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_encode_plus(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
# fmt: off
SCREAMING_SNAKE_CASE_ = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
SCREAMING_SNAKE_CASE_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token.attention_mask , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.input_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.token_type_ids , __SCREAMING_SNAKE_CASE )
self.assertListEqual(x_token_a.attention_mask , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
| 393 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __a ( __lowerCAmelCase ):
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_encoder_blocks''' ) )
class __a :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=64 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[8, 4, 2, 1] , UpperCAmelCase=[16, 32, 64, 128] , UpperCAmelCase=[1, 4, 8, 16] , UpperCAmelCase=[1, 2, 4, 8] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0_2 , UpperCAmelCase=3 , UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_encoder_blocks
lowerCAmelCase_ = sr_ratios
lowerCAmelCase_ = depths
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = downsampling_rates
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = scope
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = SegformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = lowerCAmelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = SegformerForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 1
lowerCAmelCase_ = SegformerForSemanticSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Union[str, Any] =(
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple =True
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Any =False
lowerCamelCase : int =False
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = SegformerModelTester(self )
lowerCAmelCase_ = SegformerConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCAmelCase_ = outputs.attentions
lowerCAmelCase_ = sum(self.model_tester.depths )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
lowerCAmelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase_ = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase_ = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
lowerCAmelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
lowerCAmelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase_ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = SegformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase_ = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase_ = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-1 ) )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__SCREAMING_SNAKE_CASE , align=__SCREAMING_SNAKE_CASE , do_random_crop=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase_ = encoded_inputs.pixel_values.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = outputs.logits.detach().cpu()
lowerCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
lowerCAmelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE ) | 552 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCAmelCase : str = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = """https://pypi.org/pypi/diffusers/json"""
lowerCamelCase = json.loads(request.urlopen(_lowerCamelCase ).read() )["""releases"""].keys()
return sorted(_lowerCamelCase , key=lambda lowerCamelCase__ : version.Version(_lowerCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowerCamelCase = Path(_lowerCamelCase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def __lowerCamelCase ( lowerCamelCase__ : Union[str, os.PathLike] ):
'''simple docstring'''
init_hf_modules()
lowerCamelCase = Path(_lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowerCamelCase = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase = f.read()
# Imports of the form `import .xxx`
lowerCamelCase = re.findall("""^\s*import\s+\.(\S+)\s*$""" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , _lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCamelCase ) )
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = False
lowerCamelCase = [module_file]
lowerCamelCase = []
# Let's recurse through all relative imports
while not no_change:
lowerCamelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCamelCase ) )
lowerCamelCase = Path(_lowerCamelCase ).parent
lowerCamelCase = [str(module_path / m ) for m in new_imports]
lowerCamelCase = [f for f in new_import_files if f not in all_relative_imports]
lowerCamelCase = [f'{f}.py' for f in new_import_files]
lowerCamelCase = len(_lowerCamelCase ) == 0
all_relative_imports.extend(_lowerCamelCase )
return all_relative_imports
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase = f.read()
# Imports of the form `import xxx`
lowerCamelCase = re.findall("""^\s*import\s+(\S+)\s*$""" , _lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , _lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
lowerCamelCase = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
lowerCamelCase = list(set(_lowerCamelCase ) )
lowerCamelCase = []
for imp in imports:
try:
importlib.import_module(_lowerCamelCase )
except ImportError:
missing_packages.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f'{", ".join(_lowerCamelCase )}. Run `pip install {" ".join(_lowerCamelCase )}`' )
return get_relative_imports(_lowerCamelCase )
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = module_path.replace(os.path.sep , """.""" )
lowerCamelCase = importlib.import_module(_lowerCamelCase )
if class_name is None:
return find_pipeline_class(_lowerCamelCase )
return getattr(_lowerCamelCase , _lowerCamelCase )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
lowerCamelCase = dict(inspect.getmembers(_lowerCamelCase , inspect.isclass ) )
lowerCamelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCamelCase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
lowerCamelCase = cls
return pipeline_class
def __lowerCamelCase ( lowerCamelCase__ : Union[str, os.PathLike] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[Dict[str, str]] = None , lowerCamelCase__ : Optional[Union[bool, str]] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : bool = False , ):
'''simple docstring'''
lowerCamelCase = str(_lowerCamelCase )
lowerCamelCase = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
lowerCamelCase = module_file_or_url
lowerCamelCase = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
lowerCamelCase = get_diffusers_versions()
# cut ".dev0"
lowerCamelCase = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
lowerCamelCase = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
lowerCamelCase = f'v{revision}'
elif revision == "main":
lowerCamelCase = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
lowerCamelCase = COMMUNITY_PIPELINES_URL.format(revision=_lowerCamelCase , pipeline=_lowerCamelCase )
try:
lowerCamelCase = cached_download(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
lowerCamelCase = """git"""
lowerCamelCase = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
lowerCamelCase = hf_hub_download(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , use_auth_token=_lowerCamelCase , )
lowerCamelCase = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
lowerCamelCase = check_imports(_lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
lowerCamelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCamelCase )
lowerCamelCase = Path(_lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
lowerCamelCase = f'{module_needed}.py'
shutil.copy(os.path.join(_lowerCamelCase , _lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase = use_auth_token
elif use_auth_token is True:
lowerCamelCase = HfFolder.get_token()
else:
lowerCamelCase = None
lowerCamelCase = model_info(_lowerCamelCase , revision=_lowerCamelCase , token=_lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCamelCase = submodule_path / commit_hash
lowerCamelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCamelCase , f'{module_needed}.py' , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return os.path.join(_lowerCamelCase , _lowerCamelCase )
def __lowerCamelCase ( lowerCamelCase__ : Union[str, os.PathLike] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[Dict[str, str]] = None , lowerCamelCase__ : Optional[Union[bool, str]] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowerCamelCase = get_cached_module_file(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
return get_class_in_module(_lowerCamelCase , final_module.replace(""".py""" , """""" ) )
| 457 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase_ : Optional[Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 331 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __lowerCAmelCase ):
lowerCAmelCase__ = ['''pixel_values''']
def __init__( self : Optional[int] , _A : int = True , _A : str = None , _A : str = PILImageResampling.BICUBIC , _A : str = True , _A : List[str] = True , _A : Tuple = 1 / 255 , _A : str = None , _A : Optional[int] = True , _A : Tuple = None , _A : Tuple = None , **_A : Dict , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
UpperCAmelCase__ : Optional[Any] = do_resize
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : str = do_center_crop
UpperCAmelCase__ : Dict = crop_size
UpperCAmelCase__ : Optional[Any] = size
UpperCAmelCase__ : Tuple = resample
UpperCAmelCase__ : int = rescale_factor
UpperCAmelCase__ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : int , _A : Union[str, Any] , _A : Any , _A : List[str] = PILImageResampling.BILINEAR , _A : str = None , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
UpperCAmelCase__ : str = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase__ : List[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""" )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : List[str] , _A : Optional[Any] , _A : List[Any] , _A : List[Any] = None , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : List[Any] = None , **_A : List[Any] ):
'''simple docstring'''
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Any , _A : List[str] = None , **_A : int , ):
'''simple docstring'''
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : int , _A : Tuple , _A : List[Any] = None , _A : Union[str, Any] = None , _A : int = None , _A : Tuple = None , _A : Any = None , _A : Dict = None , _A : Tuple = None , _A : List[Any] = None , _A : Tuple = None , _A : str = None , _A : Optional[int] = None , _A : Dict = ChannelDimension.FIRST , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : str = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Optional[Any] = size if size is not None else self.size
UpperCAmelCase__ : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE )
if not is_batched(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : str = [images]
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Any = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCAmelCase__ : Any = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCAmelCase__ : int = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCAmelCase__ : int = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase__ : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 75 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = VideoToVideoSDPipeline
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
UpperCamelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
UpperCamelCase_ : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase_ : Any = False
# No `output_type`.
UpperCamelCase_ : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _A ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=0 ):
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = "np"
SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
SCREAMING_SNAKE_CASE : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _A ( self : int ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _A ( self : str ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _A ( self : Optional[int] ):
pass
def _A ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : List[str] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = torch.randn((1, 10, 3, 1024, 576) , generator=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = video.to("cuda" )
SCREAMING_SNAKE_CASE : Optional[Any] = "Spiderman is surfing"
SCREAMING_SNAKE_CASE : List[Any] = pipe(__SCREAMING_SNAKE_CASE , video=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type="pt" ).frames
SCREAMING_SNAKE_CASE : List[str] = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 62 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
from __future__ import annotations
def lowercase ( __A : list[int] , __A : int ) -> list[list[int]]:
'''simple docstring'''
snake_case : Union[str, Any] = []
snake_case : Optional[int] = []
snake_case : Dict = 0
snake_case : int = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowercase ( __A : list[int] , __A : int , __A : int , __A : list[int] , __A : list[list[int]] , __A : int , ) -> None:
'''simple docstring'''
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
__lowercase : Union[str, Any] = [3, 34, 4, 12, 5, 2]
__lowercase : str = 9
__lowercase : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 36 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = CycleDiffusionPipeline
a :Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
a :Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
a :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
a :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a :Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowercase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase_ = CLIPTextModel(__SCREAMING_SNAKE_CASE )
lowercase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> Optional[Any]:
lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowercase_ = image / 2 + 0.5
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
lowercase_ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ = pipe(**__SCREAMING_SNAKE_CASE )
lowercase_ = output.images
lowercase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(__SCREAMING_SNAKE_CASE , '''half''' ):
lowercase_ = module.half()
lowercase_ = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
lowercase_ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ = pipe(**__SCREAMING_SNAKE_CASE )
lowercase_ = output.images
lowercase_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowercase ( self : Optional[Any] ) -> List[str]:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def _lowercase ( self : Optional[Any] ) -> List[str]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase ( self : Optional[Any] ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def _lowercase ( self : str ) -> Tuple:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] ) -> Optional[int]:
lowercase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowercase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
lowercase_ = init_image.resize((5_1_2, 5_1_2) )
lowercase_ = '''CompVis/stable-diffusion-v1-4'''
lowercase_ = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
lowercase_ = CycleDiffusionPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase_ = '''A black colored car'''
lowercase_ = '''A blue colored car'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _lowercase ( self : int ) -> Any:
lowercase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowercase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
lowercase_ = init_image.resize((5_1_2, 5_1_2) )
lowercase_ = '''CompVis/stable-diffusion-v1-4'''
lowercase_ = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
lowercase_ = CycleDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase_ = '''A black colored car'''
lowercase_ = '''A blue colored car'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = pipe(
prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 97 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase__ ( __lowerCAmelCase ):
'''simple docstring'''
__a : Tuple = '''gpt_neox'''
def __init__( self, snake_case__=5_04_32, snake_case__=61_44, snake_case__=44, snake_case__=64, snake_case__=2_45_76, snake_case__="gelu", snake_case__=0.25, snake_case__=1_00_00, snake_case__=0.0, snake_case__=0.0, snake_case__=0.1, snake_case__=20_48, snake_case__=0.02, snake_case__=1E-5, snake_case__=True, snake_case__=0, snake_case__=2, snake_case__=False, snake_case__=True, snake_case__=None, **snake_case__, ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE, eos_token_id=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = vocab_size
lowercase_ : Tuple = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Dict = rotary_pct
lowercase_ : Any = rotary_emb_base
lowercase_ : List[Any] = attention_dropout
lowercase_ : List[Any] = hidden_dropout
lowercase_ : Union[str, Any] = classifier_dropout
lowercase_ : Optional[Any] = initializer_range
lowercase_ : int = layer_norm_eps
lowercase_ : Optional[int] = use_cache
lowercase_ : Optional[Any] = tie_word_embeddings
lowercase_ : List[str] = use_parallel_residual
lowercase_ : List[str] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, __SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowercase_ : Optional[int] = self.rope_scaling.get("""type""", __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.rope_scaling.get("""factor""", __SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 458 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[Any] ):
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , _UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : int ):
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self : int , **_UpperCAmelCase : str ):
_A = {}
if "candidate_labels" in kwargs:
_A = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_A = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]="This is a photo of {}." ):
_A = load_image(__SCREAMING_SNAKE_CASE )
_A = self.image_processor(images=[image] , return_tensors=self.framework )
_A = candidate_labels
_A = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
_A = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
_A = [text_inputs]
return inputs
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Any ):
_A = model_inputs.pop('candidate_labels' )
_A = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
_A = text_inputs[0]
else:
# Batching case.
_A = text_inputs[0][0]
_A = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_A = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : int ):
_A = model_outputs.pop('candidate_labels' )
_A = model_outputs['logits'][0]
if self.framework == "pt":
_A = logits.softmax(dim=-1 ).squeeze(-1 )
_A = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_A = [scores]
elif self.framework == "tf":
_A = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
_A = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_A = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda _UpperCAmelCase : -x[0] )
]
return result
| 7 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 120 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "arrow" , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = load_from_cache_file
__snake_case = file_format
__snake_case = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 24 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Dict=18 , lowerCAmelCase_ : Dict=30 , lowerCAmelCase_ : Tuple=400 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : int=None , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = do_convert_rgb
SCREAMING_SNAKE_CASE_ = [512, 1_024, 2_048, 4_096]
SCREAMING_SNAKE_CASE_ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = PixaStructImageProcessor if is_vision_available() else None
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PixaStructImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE_ = 2_048
SCREAMING_SNAKE_CASE_ = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) )
def _lowercase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
SCREAMING_SNAKE_CASE_ = '''Hello'''
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowercase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = PixaStructImageProcessor if is_vision_available() else None
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE_ = 3
@property
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_convert_rgb''' ) )
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , max_patches=__SCREAMING_SNAKE_CASE ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 393 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __a ( unittest.TestCase ):
lowerCamelCase : str =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = TextaTextGenerationPipeline(model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = generator('''Something there''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , [{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
lowerCAmelCase_ = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
] , )
lowerCAmelCase_ = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
[{'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}, {'''generated_text''': ANY(__SCREAMING_SNAKE_CASE )}],
] , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
lowerCAmelCase_ = generator('''Something there''' , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , [{'''generated_text''': ''''''}] )
lowerCAmelCase_ = 3
lowerCAmelCase_ = generator(
'''Something there''' , num_return_sequences=__SCREAMING_SNAKE_CASE , num_beams=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = generator('''This is a test''' , do_sample=__SCREAMING_SNAKE_CASE , num_return_sequences=2 , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
lowerCAmelCase_ = generator.model.config.eos_token_id
lowerCAmelCase_ = '''<pad>'''
lowerCAmelCase_ = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__SCREAMING_SNAKE_CASE , num_return_sequences=2 , batch_size=2 , return_tensors=__SCREAMING_SNAKE_CASE , )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
lowerCAmelCase_ = generator('''Something there''' , do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , [{'''generated_text''': ''''''}] ) | 552 |
'''simple docstring'''
from collections import deque
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase )
__snake_case = deque()
__snake_case = [False for _ in range(_lowerCamelCase )]
__snake_case = [-1 for _ in range(_lowerCamelCase )]
__snake_case = index_of[:]
def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
__snake_case = index # the number when this node is seen
__snake_case = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__snake_case = True
for w in g[v]:
if index_of[w] == -1:
__snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__snake_case = []
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
while w != v:
__snake_case = stack.pop()
__snake_case = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__snake_case = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 24 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=4 , ) -> Any:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(__SCREAMING_SNAKE_CASE )[0]
lowerCamelCase = 5_00_00
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 457 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,a__ : int ,a__ : str=7 ,a__ : List[str]=3 ,a__ : Any=18 ,a__ : List[Any]=30 ,a__ : Any=4_00 ,a__ : Dict=True ,a__ : List[str]=None ,a__ : Optional[Any]=True ,a__ : Tuple=None ,a__ : Optional[int]=True ,a__ : List[Any]=[0.5, 0.5, 0.5] ,a__ : List[Any]=[0.5, 0.5, 0.5] ,):
a__ = size if size is not None else {"shortest_edge": 18}
a__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_center_crop
a__ = crop_size
a__ = do_normalize
a__ = image_mean
a__ = image_std
def lowerCAmelCase_ ( self : Optional[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase__ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Dict ):
a__ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[Any] ):
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"image_mean" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"image_std" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"do_normalize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"do_resize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"do_center_crop" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"size" ) )
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
a__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Any ):
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
a__ = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE ,numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
a__ = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE ,torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
a__ = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 331 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase__ = DiTPipeline
lowerCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
lowerCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ = False
def lowercase_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase__ : str = AutoencoderKL()
UpperCAmelCase__ : Any = DDIMScheduler()
UpperCAmelCase__ : Tuple = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowercase_ ( self : List[str] , _A : str , _A : str=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = '''cpu'''
UpperCAmelCase__ : int = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : str = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCAmelCase__ : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
UpperCAmelCase__ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase_ ( self : str ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
UpperCAmelCase__ : Optional[int] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
UpperCAmelCase__ : Optional[int] = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Union[str, Any] = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : str = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
UpperCAmelCase__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
UpperCAmelCase__ : Union[str, Any] = ['''vase''', '''umbrella''']
UpperCAmelCase__ : str = pipe.get_label_ids(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = pipe(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 75 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
__snake_case = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] )-> Tuple:
'''simple docstring'''
__snake_case = dct.pop(_lowerCamelCase )
__snake_case = val
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple )-> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__snake_case = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__snake_case = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
__snake_case = qkv_bias
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Tuple )-> Dict:
'''simple docstring'''
__snake_case = 3_64 if '''coco''' in model_name else 2_24
__snake_case = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__snake_case = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=False )-> Dict:
'''simple docstring'''
__snake_case = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__snake_case = tokenizer('''\n''' , add_special_tokens=_lowerCamelCase ).input_ids[0]
__snake_case , __snake_case = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
__snake_case = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
__snake_case = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__snake_case , __snake_case = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case , __snake_case , __snake_case = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
__snake_case = original_model.state_dict()
__snake_case = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(_lowerCamelCase )
if key.startswith('''Qformer.bert''' ):
__snake_case = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__snake_case = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__snake_case = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__snake_case = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__snake_case = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__snake_case = key.replace('''t5''' , '''language''' )
__snake_case = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__snake_case = load_demo_image()
__snake_case = vis_processors['''eval'''](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__snake_case = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
# create processor
__snake_case = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
__snake_case = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__snake_case = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__snake_case = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
__snake_case = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__snake_case = ''''''
__snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' ).input_ids.to(_lowerCamelCase )
__snake_case = original_model.generate({'''image''': original_pixel_values} )
__snake_case = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , _lowerCamelCase )
__snake_case = input_ids.shape[1]
__snake_case = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
__snake_case = [text.strip() for text in output_text]
print('''HF generation:''' , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
UpperCAmelCase_ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ ( lowercase , lowercase , lowercase = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_lowerCamelCase ), magnitude * sin(_lowerCamelCase )]
return [magnitude * cos(radians(_lowerCamelCase ) ), magnitude * sin(radians(_lowerCamelCase ) )]
def lowerCamelCase__ ( lowercase , lowercase , lowercase = 10**-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = cross(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = sum(_lowerCamelCase )
return abs(_lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
snake_case = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
snake_case = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
snake_case = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
snake_case = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
snake_case = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 62 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case , __snake_case = image[0].size
__snake_case , __snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return image
def _UpperCamelCase (_lowerCamelCase : Union[List, PIL.Image.Image, torch.Tensor] )-> Optional[Any]:
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__snake_case , __snake_case = mask[0].size
__snake_case , __snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__snake_case = np.concatenate(_lowerCamelCase , axis=0 )
__snake_case = mask.astype(np.floataa ) / 255.0
__snake_case = 0
__snake_case = 1
__snake_case = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
__snake_case = torch.cat(_lowerCamelCase , dim=0 )
return mask
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = image
__snake_case = _preprocess_image(__SCREAMING_SNAKE_CASE )
__snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = _preprocess_mask(__SCREAMING_SNAKE_CASE )
__snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
__snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case = original_image.shape
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.device )
__snake_case = eta
__snake_case = self.scheduler.timesteps[0] + 1
__snake_case = generator[0] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
# compute previous image: x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__snake_case = self.scheduler.undo_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = t
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 | 0 |
from sklearn.metrics import fa_score
import datasets
__lowercase : Dict = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
__lowercase : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
__lowercase : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_="binary" ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
snake_case : Dict = fa_score(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ,pos_label=__SCREAMING_SNAKE_CASE ,average=__SCREAMING_SNAKE_CASE ,sample_weight=__SCREAMING_SNAKE_CASE )
return {"f1": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 36 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = {}
if "candidate_labels" in kwargs:
__snake_case = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__snake_case = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="This is a photo of {}." ) -> Optional[Any]:
'''simple docstring'''
__snake_case = load_image(__SCREAMING_SNAKE_CASE )
__snake_case = self.image_processor(images=[image] , return_tensors=self.framework )
__snake_case = candidate_labels
__snake_case = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__snake_case = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__snake_case = [text_inputs]
return inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = model_inputs.pop('''candidate_labels''' )
__snake_case = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__snake_case = text_inputs[0]
else:
# Batching case.
__snake_case = text_inputs[0][0]
__snake_case = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__snake_case = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = model_outputs.pop('''candidate_labels''' )
__snake_case = model_outputs['''logits'''][0]
if self.framework == "pt":
__snake_case = logits.softmax(dim=-1 ).squeeze(-1 )
__snake_case = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [scores]
elif self.framework == "tf":
__snake_case = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__snake_case = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 24 | 0 |
import os
def a ( snake_case__: str = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(_lowerCamelCase ) , _lowerCamelCase ) ) as in_file:
lowercase_ = in_file.read()
lowercase_ = [[int(_lowerCamelCase ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
lowercase_ = [[0 for cell in row] for row in grid]
lowercase_ = len(grid[0] )
lowercase_ = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
lowercase_ = grid[0][0]
for i in range(1 , _lowerCamelCase ):
lowercase_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , _lowerCamelCase ):
lowercase_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , _lowerCamelCase ):
for j in range(1 , _lowerCamelCase ):
lowercase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"{solution() = }")
| 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCamelCase__ ( __lowerCAmelCase ):
'''simple docstring'''
__a : int = '''cvt'''
def __init__( self, snake_case__=3, snake_case__=[7, 3, 3], snake_case__=[4, 2, 2], snake_case__=[2, 1, 1], snake_case__=[64, 1_92, 3_84], snake_case__=[1, 3, 6], snake_case__=[1, 2, 10], snake_case__=[4.0, 4.0, 4.0], snake_case__=[0.0, 0.0, 0.0], snake_case__=[0.0, 0.0, 0.0], snake_case__=[0.0, 0.0, 0.1], snake_case__=[True, True, True], snake_case__=[False, False, True], snake_case__=["dw_bn", "dw_bn", "dw_bn"], snake_case__=[3, 3, 3], snake_case__=[1, 1, 1], snake_case__=[2, 2, 2], snake_case__=[1, 1, 1], snake_case__=[1, 1, 1], snake_case__=0.02, snake_case__=1E-12, **snake_case__, ) -> int:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = num_channels
lowercase_ : str = patch_sizes
lowercase_ : Tuple = patch_stride
lowercase_ : Optional[Any] = patch_padding
lowercase_ : Tuple = embed_dim
lowercase_ : Tuple = num_heads
lowercase_ : Tuple = depth
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Union[str, Any] = attention_drop_rate
lowercase_ : int = drop_rate
lowercase_ : Tuple = drop_path_rate
lowercase_ : Dict = qkv_bias
lowercase_ : int = cls_token
lowercase_ : str = qkv_projection_method
lowercase_ : str = kernel_qkv
lowercase_ : Optional[Any] = padding_kv
lowercase_ : Optional[Any] = stride_kv
lowercase_ : List[str] = padding_q
lowercase_ : List[Any] = stride_q
lowercase_ : Tuple = initializer_range
lowercase_ : Any = layer_norm_eps | 458 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _snake_case ( _snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
_A = botoa.client('iam' )
_A = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_lowerCamelCase , AssumeRolePolicyDocument=json.dumps(_lowerCamelCase , indent=2 ) )
_A = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_lowerCamelCase , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(_lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _snake_case ( _snake_case : Any ) -> Any:
'''simple docstring'''
_A = botoa.client('iam' )
return iam_client.get_role(RoleName=_lowerCamelCase )["Role"]["Arn"]
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _lowerCamelCase , )
_A = None
if credentials_configuration == 0:
_A = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_A = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_A = _ask_field('AWS Access Key ID: ' )
_A = aws_access_key_id
_A = _ask_field('AWS Secret Access Key: ' )
_A = aws_secret_access_key
_A = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_A = aws_region
_A = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _lowerCamelCase , )
if role_management == 0:
_A = _ask_field('Enter your IAM role name: ' )
else:
_A = 'accelerate_sagemaker_execution_role'
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(_lowerCamelCase )
_A = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='Please enter yes or no.' , )
_A = None
if is_custom_docker_image:
_A = _ask_field('Enter your Docker image: ' , lambda _snake_case : str(_lowerCamelCase ).lower() )
_A = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='Please enter yes or no.' , )
_A = None
if is_sagemaker_inputs_enabled:
_A = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _snake_case : str(_lowerCamelCase ).lower() , )
_A = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='Please enter yes or no.' , )
_A = None
if is_sagemaker_metrics_enabled:
_A = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _snake_case : str(_lowerCamelCase ).lower() , )
_A = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_A = {}
_A = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
_A = 'dynamo_'
_A = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_A = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
_A = _ask_options(
'Which mode do you want to use?' , _lowerCamelCase , lambda _snake_case : TORCH_DYNAMO_MODES[int(_lowerCamelCase )] , default='default' , )
_A = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='Please enter yes or no.' , )
_A = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_lowerCamelCase , error_message='Please enter yes or no.' , )
_A = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_A = _ask_options(
_lowerCamelCase , _lowerCamelCase , lambda _snake_case : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_A = _ask_field(_lowerCamelCase , lambda _snake_case : str(_lowerCamelCase ).lower() , default='ml.p3.2xlarge' )
_A = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_A = _ask_field(
'How many machines do you want use? [1]: ' , _lowerCamelCase , default=1 , )
_A = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_lowerCamelCase , use_cpu=_lowerCamelCase , dynamo_config=_lowerCamelCase , eca_instance_type=_lowerCamelCase , profile=_lowerCamelCase , region=_lowerCamelCase , iam_role_name=_lowerCamelCase , mixed_precision=_lowerCamelCase , num_machines=_lowerCamelCase , sagemaker_inputs_file=_lowerCamelCase , sagemaker_metrics_file=_lowerCamelCase , )
| 7 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__snake_case = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_lowerCamelCase ) , '''Postfix'''.center(_lowerCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''''''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
__snake_case = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__snake_case = ''')''' # change "(" to ")"
elif infix[i] == ")":
__snake_case = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 24 | 0 |
'''simple docstring'''
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None ) -> Dict:
_a : Optional[int] = data
_a : str = previous
_a : Optional[Any] = next_node
def __str__( self ) -> str:
return F'''{self.data}'''
def __UpperCamelCase ( self ) -> int:
return self.data
def __UpperCamelCase ( self ) -> Dict:
return self.next
def __UpperCamelCase ( self ) -> int:
return self.previous
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Tuple:
_a : Dict = head
def __iter__( self ) -> Any:
return self
def __UpperCamelCase ( self ) -> Tuple:
if not self.current:
raise StopIteration
else:
_a : List[str] = self.current.get_data()
_a : Tuple = self.current.get_next()
return value
class a :
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
_a : Any = None # First node in list
_a : Optional[int] = None # Last node in list
def __str__( self ) -> Any:
_a : List[Any] = self.head
_a : Optional[Any] = []
while current is not None:
nodes.append(current.get_data() )
_a : List[str] = current.get_next()
return " ".join(str(__SCREAMING_SNAKE_CASE ) for node in nodes )
def __contains__( self , lowerCamelCase_ ) -> Optional[Any]:
_a : Any = self.head
while current:
if current.get_data() == value:
return True
_a : int = current.get_next()
return False
def __iter__( self ) -> Optional[Any]:
return LinkedListIterator(self.head )
def __UpperCamelCase ( self ) -> Union[str, Any]:
if self.head:
return self.head.get_data()
return None
def __UpperCamelCase ( self ) -> Tuple:
if self.tail:
return self.tail.get_data()
return None
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
if self.head is None:
_a : int = node
_a : Any = node
else:
self.insert_before_node(self.head , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
if self.head is None:
self.set_head(__SCREAMING_SNAKE_CASE )
else:
self.insert_after_node(self.tail , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
_a : Union[str, Any] = Node(__SCREAMING_SNAKE_CASE )
if self.head is None:
self.set_head(__SCREAMING_SNAKE_CASE )
else:
self.set_tail(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
_a : Tuple = node
_a : int = node.previous
if node.get_previous() is None:
_a : Optional[int] = node_to_insert
else:
_a : Union[str, Any] = node_to_insert
_a : Dict = node_to_insert
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
_a : Any = node
_a : Any = node.next
if node.get_next() is None:
_a : int = node_to_insert
else:
_a : int = node_to_insert
_a : Optional[Any] = node_to_insert
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
_a : List[str] = 1
_a : Tuple = Node(__SCREAMING_SNAKE_CASE )
_a : Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return
current_position += 1
_a : Dict = node.next
self.insert_after_node(self.tail , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Node:
_a : str = self.head
while node:
if node.get_data() == item:
return node
_a : List[Any] = node.get_next()
raise Exception('Node not found' )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Union[str, Any]:
if (node := self.get_node(__SCREAMING_SNAKE_CASE )) is not None:
if node == self.head:
_a : List[str] = self.head.get_next()
if node == self.tail:
_a : Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(__SCREAMING_SNAKE_CASE )
@staticmethod
def __UpperCamelCase ( lowerCamelCase_ ) -> None:
if node.get_next():
_a : int = node.previous
if node.get_previous():
_a : int = node.next
_a : Tuple = None
_a : str = None
def __UpperCamelCase ( self ) -> Tuple:
return self.head is None
def UpperCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase):
__lowercase : List[Any] = '''swin'''
__lowercase : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Optional[int] = version.parse('''1.11''')
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 24 | 0 |
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
SCREAMING_SNAKE_CASE_ = 1
for n in range(m + 1 ):
for k in range(1 ,_lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A_ = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
A_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 393 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = int(_lowerCamelCase )
__snake_case , __snake_case , __snake_case = t // 36_00, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : List[Any]=3_00 )-> int:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase (_lowerCamelCase : int )-> List[Any]:
'''simple docstring'''
__snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case = f'''{elt:.6f}''' if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCAmelCase :
__lowercase : str = 5
__lowercase : Optional[Any] = 0.2
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case = total
__snake_case = '''''' if prefix is None else prefix
__snake_case = leave
__snake_case = parent
__snake_case = width
__snake_case = None
__snake_case = None
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
__snake_case = value
if comment is not None:
__snake_case = comment
if self.last_value is None:
__snake_case = __snake_case = time.time()
__snake_case = __snake_case = value
__snake_case = __snake_case = None
__snake_case = self.warmup
__snake_case = 1
self.update_bar(__SCREAMING_SNAKE_CASE )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case = time.time()
__snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case = self.elapsed_time / (value - self.start_value)
else:
__snake_case = None
if value >= self.total:
__snake_case = self.total
__snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__SCREAMING_SNAKE_CASE )
__snake_case = value
__snake_case = current_time
if self.average_time_per_item is None:
__snake_case = 1
else:
__snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__SCREAMING_SNAKE_CASE ) )) + str(__SCREAMING_SNAKE_CASE )
if self.elapsed_time is None:
__snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE )
__snake_case = None if column_names is None else [column_names]
__snake_case = None
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__SCREAMING_SNAKE_CASE )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if self.inner_table is None:
__snake_case = [list(values.keys() ), list(values.values() )]
else:
__snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__SCREAMING_SNAKE_CASE )
__snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=300 ) -> List[str]:
'''simple docstring'''
__snake_case = NotebookProgressBar(__SCREAMING_SNAKE_CASE , prefix=__SCREAMING_SNAKE_CASE , parent=self , width=__SCREAMING_SNAKE_CASE )
return self.child_bar
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
self.display()
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self ) -> str:
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__snake_case = 0
__snake_case = 0
__snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__snake_case = NotebookTrainingTracker(state.max_steps , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__snake_case = False
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if not has_length(__SCREAMING_SNAKE_CASE ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case = self.training_tracker.add_child(len(__SCREAMING_SNAKE_CASE ) )
else:
__snake_case = NotebookProgressBar(len(__SCREAMING_SNAKE_CASE ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case = None
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case = state.global_step
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case = log['''loss''']
break
if self.first_column == "Epoch":
__snake_case = int(state.epoch )
else:
__snake_case = state.global_step
__snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__snake_case = re.sub(r'''\_loss$''' , '''''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''total_flos''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop('''epoch''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __SCREAMING_SNAKE_CASE )
__snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __SCREAMING_SNAKE_CASE )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__snake_case = v
else:
__snake_case = k.split('''_''' )
__snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
__snake_case = v
self.training_tracker.write_line(__SCREAMING_SNAKE_CASE )
self.training_tracker.remove_child()
__snake_case = None
# Evaluation takes a long time so we should force the next update.
__snake_case = True
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__SCREAMING_SNAKE_CASE )
__snake_case = None
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 552 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowercase ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase : jnp.ndarray
@flax_register_to_config
class __lowercase ( nn.Module , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase : int = 3_2
UpperCamelCase : int = 4
UpperCamelCase : int = 4
UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCamelCase : Union[bool, Tuple[bool]] = False
UpperCamelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
UpperCamelCase : int = 2
UpperCamelCase : Union[int, Tuple[int]] = 8
UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase : int = 1_2_8_0
UpperCamelCase : float = 0.0
UpperCamelCase : bool = False
UpperCamelCase : jnp.dtype = jnp.floataa
UpperCamelCase : bool = True
UpperCamelCase : int = 0
UpperCamelCase : bool = False
def __A ( self , A ) -> FrozenDict:
'''simple docstring'''
lowerCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
lowerCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase , lowerCamelCase = jax.random.split(__SCREAMING_SNAKE_CASE )
lowerCamelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.block_out_channels
lowerCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
lowerCamelCase = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase = []
lowerCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase = output_channel
lowerCamelCase = block_out_channels[i]
lowerCamelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
lowerCamelCase = down_blocks
# mid
lowerCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase = []
lowerCamelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase = output_channel
lowerCamelCase = reversed_block_out_channels[i]
lowerCamelCase = reversed_block_out_channels[min(i + 1 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase = FlaxUpBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__SCREAMING_SNAKE_CASE )
lowerCamelCase = output_channel
lowerCamelCase = up_blocks
# out
lowerCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A , A , A , A=None , A=None , A = True , A = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
lowerCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCamelCase = self.time_proj(__SCREAMING_SNAKE_CASE )
lowerCamelCase = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
lowerCamelCase = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
lowerCamelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
# 3. down
lowerCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase , lowerCamelCase = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
lowerCamelCase , lowerCamelCase = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase = new_down_block_res_samples
# 4. mid
lowerCamelCase = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase = up_block(
__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , deterministic=not train , )
else:
lowerCamelCase = up_block(__SCREAMING_SNAKE_CASE , temb=__SCREAMING_SNAKE_CASE , res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , deterministic=not train )
# 6. post-process
lowerCamelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
lowerCamelCase = nn.silu(__SCREAMING_SNAKE_CASE )
lowerCamelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
lowerCamelCase = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__SCREAMING_SNAKE_CASE )
| 457 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = UnCLIPImageVariationPipeline
UpperCamelCase__ = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCamelCase__ = IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCamelCase__ = False
@property
def lowerCAmelCase_ ( self : Tuple ):
return 32
@property
def lowerCAmelCase_ ( self : List[str] ):
return 32
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : List[Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1_00
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase_ ( self : int ):
torch.manual_seed(0 )
a__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
a__ = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
a__ = UnCLIPTextProjModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
a__ = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
a__ = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCAmelCase_ ( self : str ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
a__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCAmelCase_ ( self : List[str] ):
torch.manual_seed(1 )
a__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.dummy_decoder
a__ = self.dummy_text_proj
a__ = self.dummy_text_encoder
a__ = self.dummy_tokenizer
a__ = self.dummy_super_res_first
a__ = self.dummy_super_res_last
a__ = UnCLIPScheduler(
variance_type="learned_range" ,prediction_type="epsilon" ,num_train_timesteps=10_00 ,)
a__ = UnCLIPScheduler(
variance_type="fixed_small_log" ,prediction_type="epsilon" ,num_train_timesteps=10_00 ,)
a__ = CLIPImageProcessor(crop_size=32 ,size=32 )
a__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase_ ( self : Any ,a__ : Dict ,a__ : Optional[Any]=0 ,a__ : Optional[Any]=True ):
a__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
a__ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
if pil_image:
a__ = input_image * 0.5 + 0.5
a__ = input_image.clamp(0 ,1 )
a__ = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
a__ = DiffusionPipeline.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase_ ( self : Tuple ):
a__ = "cpu"
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
a__ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
a__ = pipe(**__SCREAMING_SNAKE_CASE )
a__ = output.images
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
a__ = pipe(
**__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = "cpu"
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
a__ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
a__ = pipe(**__SCREAMING_SNAKE_CASE )
a__ = output.images
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
a__ = pipe(
**__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = "cpu"
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
a__ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
a__ = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
a__ = pipe(**__SCREAMING_SNAKE_CASE )
a__ = output.images
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
a__ = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
a__ = pipe(
**__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
a__ = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = torch.device("cpu" )
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = 1
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
a__ = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a__ = pipe.decoder.dtype
a__ = 1
a__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
a__ = pipe.prepare_latents(
__SCREAMING_SNAKE_CASE ,dtype=__SCREAMING_SNAKE_CASE ,device=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,latents=__SCREAMING_SNAKE_CASE ,scheduler=DummyScheduler() )
a__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
a__ = pipe.prepare_latents(
__SCREAMING_SNAKE_CASE ,dtype=__SCREAMING_SNAKE_CASE ,device=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,latents=__SCREAMING_SNAKE_CASE ,scheduler=DummyScheduler() )
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
a__ = pipe(
**__SCREAMING_SNAKE_CASE ,decoder_latents=__SCREAMING_SNAKE_CASE ,super_res_latents=__SCREAMING_SNAKE_CASE ).images
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ,pil_image=__SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
a__ = pipeline_inputs.pop("image" )
a__ = pipe.image_encoder(__SCREAMING_SNAKE_CASE ).image_embeds
a__ = pipe(
**__SCREAMING_SNAKE_CASE ,decoder_latents=__SCREAMING_SNAKE_CASE ,super_res_latents=__SCREAMING_SNAKE_CASE ,image_embeddings=__SCREAMING_SNAKE_CASE ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
a__ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__SCREAMING_SNAKE_CASE ,expected_max_diff=__SCREAMING_SNAKE_CASE )
@skip_mps
def lowerCAmelCase_ ( self : Tuple ):
a__ = torch_device == "cpu"
a__ = True
a__ = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=__SCREAMING_SNAKE_CASE ,relax_max_difference=__SCREAMING_SNAKE_CASE ,additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE ,)
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
a__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__SCREAMING_SNAKE_CASE ,additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE )
@skip_mps
def lowerCAmelCase_ ( self : str ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase_ ( self : Any ):
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase_ ( self : List[Any] ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
a__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
a__ = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" ,torch_dtype=torch.floataa )
a__ = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = torch.Generator(device="cpu" ).manual_seed(0 )
a__ = pipeline(
__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,output_type="np" ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,15 )
| 331 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int )-> float:
'''simple docstring'''
__snake_case = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _UpperCamelCase ()-> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase__ = sys.version_info >= (3, 1_0)
def a__ ( lowerCAmelCase__=None , lowerCAmelCase__=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = None
class lowerCamelCase_ ( __lowerCAmelCase ):
lowerCAmelCase__ = '''titi'''
lowerCAmelCase__ = '''toto'''
class lowerCamelCase_ ( __lowerCAmelCase ):
lowerCAmelCase__ = '''titi'''
lowerCAmelCase__ = '''toto'''
lowerCAmelCase__ = 4_2
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = "toto"
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BasicEnum(self.foo )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = "toto"
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
lowerCAmelCase__ = None
lowerCAmelCase__ = list_field(default=[] )
lowerCAmelCase__ = list_field(default=[] )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = list_field(default=[] )
lowerCAmelCase__ = list_field(default=[1, 2, 3] )
lowerCAmelCase__ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowerCAmelCase__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = field()
lowerCAmelCase__ = field()
lowerCAmelCase__ = field()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = field()
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default='toto' , metadata={'help': 'help message'} )
lowerCAmelCase__ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = None
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
lowerCAmelCase__ = None
lowerCAmelCase__ = list_field(default=[] )
lowerCAmelCase__ = list_field(default=[] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[int] , _A : Dict , _A : Optional[int] ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase__ : int = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
UpperCAmelCase__ : int = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __SCREAMING_SNAKE_CASE ) and yy.get('''choices''' , __SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__SCREAMING_SNAKE_CASE ) , yy['''type'''](__SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--flag''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((UpperCAmelCase__ ) , ) : Dict = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
expected.add_argument('''--baz''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__SCREAMING_SNAKE_CASE , dest='''baz''' )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCAmelCase__ : Dict = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ : Tuple = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ : Optional[int] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ : int = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ : Tuple = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Any = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCAmelCase__ : Any = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase__ : Any = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCAmelCase__ : str = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase__ : List[Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase__ : Optional[Any] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = "toto"
UpperCAmelCase__ : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
UpperCAmelCase__ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
UpperCAmelCase__ : Optional[Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = parser.parse_args([] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase__ : List[Any] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--bar''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--baz''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCAmelCase__ : str = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
UpperCAmelCase__ : int = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--required_str''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__SCREAMING_SNAKE_CASE , )
expected.add_argument('''--opt''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
expected.add_argument('''--baz''' , default='''toto''' , type=__SCREAMING_SNAKE_CASE , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : str = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
UpperCAmelCase__ : Optional[Any] = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase__ : List[Any] = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_json''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Tuple = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
UpperCAmelCase__ : Optional[int] = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Any = {
'''foo''': 12,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''temp_yaml''' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
UpperCAmelCase__ : Union[str, Any] = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 75 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=False )-> Union[str, Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(_lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCAmelCase_ : Union[str, Any] = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCAmelCase_ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCAmelCase_ : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCAmelCase_ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCAmelCase_ : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[Any]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('''test requires faiss''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[str]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('''test requires regex''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('''test requires elasticsearch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> List[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('''test requires sqlalchemy''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : List[str] )-> List[str]:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('''test requires PyTorch''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[Any] )-> Dict:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('''test requires TensorFlow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('''test requires JAX''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('''test requires Pillow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Dict )-> str:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : int )-> Dict:
'''simple docstring'''
def _require_spacy_model(_lowerCamelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowerCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowerCamelCase ) )(_lowerCamelCase )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase (_lowerCamelCase : str )-> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Tuple )-> str:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowerCamelCase )
else:
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('''test is slow''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Any )-> Optional[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('''test is local''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : str )-> int:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('''test is packaged''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (_lowerCamelCase : Optional[int] )-> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('''test requires remote''' )(_lowerCamelCase )
return test_case
def _UpperCamelCase (*_lowerCamelCase : str )-> Optional[int]:
'''simple docstring'''
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
__snake_case = decorator(_lowerCamelCase )
setattr(cls , _lowerCamelCase , _lowerCamelCase )
return cls
return decorate
class lowerCAmelCase ( __lowerCAmelCase):
pass
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[str] = 0
__lowercase : Dict = 1
__lowercase : List[Any] = 2
@contextmanager
def _UpperCamelCase (_lowerCamelCase : Dict=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : Optional[int]=1E-16 )-> Tuple:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(_lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Any ):
# Change the url to an invalid url so that the connection hangs
__snake_case = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__snake_case = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('''10.255.255.1''' , f'''OfflineMock[{url}]''' ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , **_lowerCamelCase : Dict ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_lowerCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _lowerCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowerCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _UpperCamelCase (*_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] )-> Any:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase ) as tmp_dir:
try:
os.chdir(_lowerCamelCase )
yield
finally:
os.chdir(_lowerCamelCase )
@contextmanager
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int )-> Any:
'''simple docstring'''
return deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(_lowerCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : int , *_lowerCamelCase : int , **_lowerCamelCase : Optional[int] ):
try:
return func(*_lowerCamelCase , **_lowerCamelCase )
except HTTPError as err:
if str(_lowerCamelCase ).startswith('''500''' ) or str(_lowerCamelCase ).startswith('''502''' ):
pytest.xfail(str(_lowerCamelCase ) )
raise err
return decorator.decorator(_wrapper , _lowerCamelCase )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def _UpperCamelCase (_lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] )-> Dict:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(_lowerCamelCase )
else:
break
async def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(_lowerCamelCase ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(_lowerCamelCase )
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _lowerCamelCase : tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[Any]=1_80 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=True )-> _RunOutput:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase ) )
__snake_case = ''' '''.join(_lowerCamelCase )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__snake_case = re.sub(R'''^gw''' , '''''' , _lowerCamelCase , 0 , re.M )
return int(_lowerCamelCase )
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = 2_95_00
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 24 | 0 |
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = len(_lowerCamelCase ), len(grid[0] )
if (
min(_lowerCamelCase , _lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE : int = 0
count += depth_first_search(_lowerCamelCase , row + 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , row - 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col + 1 , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col - 1 , _lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.