code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__A : List[Any] = logging.get_logger(__name__)
class __A ( lowerCAmelCase__ ):
def __init__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 138 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( _A : Optional[Any] , _A : List[str]=7 ) ->str:
"""simple docstring"""
lowerCamelCase_ =None
if token is not None:
lowerCamelCase_ ={"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
lowerCamelCase_ ="""636036"""
lowerCamelCase_ =f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
lowerCamelCase_ =requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def __UpperCamelCase ( _A : Optional[int] ) ->int:
"""simple docstring"""
lowerCamelCase_ =get_daily_ci_runs(_A )
lowerCamelCase_ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase_ =workflow_run["""id"""]
break
return workflow_run_id
def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
lowerCamelCase_ =get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase_ =artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def __UpperCamelCase ( _A : int , _A : Any , _A : Optional[int] ) ->List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
lowerCamelCase_ ={}
for artifact_name in artifact_names:
lowerCamelCase_ =os.path.join(_A , f'{artifact_name}.zip' )
if os.path.isfile(_A ):
lowerCamelCase_ ={}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
lowerCamelCase_ =f.read().decode("""UTF-8""" )
return results
| 154 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302 | 1 |
'''simple docstring'''
from manim import *
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : List[str] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : Tuple = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : List[str] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : Dict = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : Dict = Text('CPU' , font_size=24 )
UpperCAmelCase : Optional[Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : str = Text('GPU' , font_size=24 )
UpperCAmelCase : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
UpperCAmelCase : str = [mem.copy() for i in range(6 )]
UpperCAmelCase : str = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : Any = Text('Model' , font_size=24 )
UpperCAmelCase : Any = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : List[Any] = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
UpperCAmelCase : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
model_cpu_arr.append(lowercase_ )
self.add(*lowercase_ , *lowercase_ , *lowercase_ )
UpperCAmelCase : str = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : str = Text('Loaded Checkpoint' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowercase_ )
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Dict = []
for i, rect in enumerate(lowercase_ ):
UpperCAmelCase : Optional[int] = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
ckpt_arr.append(lowercase_ )
UpperCAmelCase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowercase_ )
self.add(*lowercase_ , *lowercase_ )
UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : Tuple = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
UpperCAmelCase : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowercase_ )
UpperCAmelCase : Any = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCAmelCase : List[Any] = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase : Any = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : Any = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase : Any = Text('Disk' , font_size=24 )
UpperCAmelCase : Any = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowercase_ , run_time=3 ) , Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
UpperCAmelCase : List[Any] = []
for i, rect in enumerate(lowercase_ ):
UpperCAmelCase : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(FadeOut(lowercase_ ) )
UpperCAmelCase : Tuple = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ , run_time=3 ) )
self.play(
FadeOut(lowercase_ , lowercase_ , *lowercase_ , *lowercase_ ) , )
self.wait()
| 151 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
UpperCAmelCase : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : int = value
elif weight_type == "weight_v":
UpperCAmelCase : str = value
elif weight_type == "bias":
UpperCAmelCase : List[Any] = value
else:
UpperCAmelCase : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = []
UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase : Dict = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : int = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase : Any = True
if "*" in mapped_key:
UpperCAmelCase : Any = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : int = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : Optional[int] = 'weight_v'
elif "weight" in name:
UpperCAmelCase : Optional[Any] = 'weight'
elif "bias" in name:
UpperCAmelCase : str = 'bias'
else:
UpperCAmelCase : str = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
UpperCAmelCase : Optional[Any] = name.split('.' )
UpperCAmelCase : Optional[Any] = int(items[0] )
UpperCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = SEWConfig()
if is_finetuned:
UpperCAmelCase : List[str] = model.wav_encoder.wav_model.cfg
else:
UpperCAmelCase : Optional[Any] = model.cfg
UpperCAmelCase : str = fs_config.conv_bias
UpperCAmelCase : Optional[Any] = eval(fs_config.conv_feature_layers )
UpperCAmelCase : Optional[Any] = [x[0] for x in conv_layers]
UpperCAmelCase : str = [x[1] for x in conv_layers]
UpperCAmelCase : str = [x[2] for x in conv_layers]
UpperCAmelCase : Tuple = 'gelu'
UpperCAmelCase : List[str] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
UpperCAmelCase : List[Any] = 0.0
UpperCAmelCase : Optional[int] = fs_config.activation_fn.name
UpperCAmelCase : Tuple = fs_config.encoder_embed_dim
UpperCAmelCase : List[str] = 0.02
UpperCAmelCase : Any = fs_config.encoder_ffn_embed_dim
UpperCAmelCase : Any = 1E-5
UpperCAmelCase : Any = fs_config.encoder_layerdrop
UpperCAmelCase : List[str] = fs_config.encoder_attention_heads
UpperCAmelCase : Union[str, Any] = fs_config.conv_pos_groups
UpperCAmelCase : str = fs_config.conv_pos
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = fs_config.encoder_layers
UpperCAmelCase : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCAmelCase : List[Any] = model.cfg
UpperCAmelCase : Tuple = fs_config.final_dropout
UpperCAmelCase : Tuple = fs_config.layerdrop
UpperCAmelCase : int = fs_config.activation_dropout
UpperCAmelCase : Union[str, Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCAmelCase : str = fs_config.attention_dropout
UpperCAmelCase : Optional[Any] = fs_config.dropout_input
UpperCAmelCase : Optional[int] = fs_config.dropout
UpperCAmelCase : str = fs_config.mask_channel_length
UpperCAmelCase : Optional[Any] = fs_config.mask_channel_prob
UpperCAmelCase : Any = fs_config.mask_length
UpperCAmelCase : int = fs_config.mask_prob
UpperCAmelCase : Optional[Any] = 'Wav2Vec2FeatureExtractor'
UpperCAmelCase : Tuple = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True ):
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCAmelCase : List[str] = SEWConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = convert_config(model[0] , UpperCAmelCase_ )
UpperCAmelCase : int = model[0].eval()
UpperCAmelCase : Tuple = True if config.feat_extract_norm == 'layer' else False
UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
if is_finetuned:
if dict_path:
UpperCAmelCase : Optional[Any] = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : List[Any] = target_dict.pad_index
UpperCAmelCase : Optional[Any] = target_dict.bos_index
UpperCAmelCase : int = target_dict.pad_index
UpperCAmelCase : Tuple = target_dict.bos_index
UpperCAmelCase : int = target_dict.eos_index
UpperCAmelCase : Optional[int] = len(target_dict.symbols )
UpperCAmelCase : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase_ , )
UpperCAmelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase : List[str] = SEWForCTC(UpperCAmelCase_ )
else:
UpperCAmelCase : Tuple = SEWModel(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 151 | 1 |
__UpperCAmelCase = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 354 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
UpperCAmelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
UpperCAmelCase_ : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase_ : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase_ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
UpperCAmelCase_ : Optional[Any] = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
UpperCAmelCase_ : Any = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCamelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCamelCase ) , )
# Copy consistency with a really long name
UpperCAmelCase_ : Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub('Bert' , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCamelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCamelCase ) , )
| 145 | 0 |
"""simple docstring"""
import math
def _lowerCAmelCase ( ):
UpperCAmelCase = input('Enter message: ' )
UpperCAmelCase = int(input(F"""Enter key [2-{len(lowercase_ ) - 1}]: """ ) )
UpperCAmelCase = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
UpperCAmelCase = encrypt_message(lowercase_ , lowercase_ )
elif mode.lower().startswith('d' ):
UpperCAmelCase = decrypt_message(lowercase_ , lowercase_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""" )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [''] * key
for col in range(lowercase_ ):
UpperCAmelCase = col
while pointer < len(lowercase_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = math.ceil(len(lowercase_ ) / key )
UpperCAmelCase = key
UpperCAmelCase = (num_cols * num_rows) - len(lowercase_ )
UpperCAmelCase = [''] * num_cols
UpperCAmelCase = 0
UpperCAmelCase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase = 0
row += 1
return "".join(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 78 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Union[str, Any]=[] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Optional[int] = size[0] - overlap_pixels * 2
UpperCamelCase :Optional[int] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCamelCase :Optional[Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCamelCase :int = np.pad(__magic_name__ , mode="""linear_ramp""" , pad_width=__magic_name__ , end_values=0 )
if "l" in remove_borders:
UpperCamelCase :List[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCamelCase :Dict = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCamelCase :Dict = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCamelCase :str = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
return max(__magic_name__ , min(__magic_name__ , __magic_name__ ) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : [int] , __magic_name__ : [int] , __magic_name__ : [int] ) -> Optional[Any]:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : [int] , __magic_name__ : int , __magic_name__ : [int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = list(__magic_name__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCamelCase :str = clamp_rect(__magic_name__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[int] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__magic_name__ , (original_slice, 0) )
return result
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCamelCase :List[str] = tile.crop(__magic_name__ )
return tile
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = n % d
return n - divisor
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Any , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : DDPMScheduler , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : int = 350 , ):
super().__init__(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , max_noise_level=__lowerCamelCase , )
def _A ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , **__lowerCamelCase : int ):
torch.manual_seed(0 )
UpperCamelCase :int = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCamelCase :Optional[Any] = add_overlap_rect(__lowerCamelCase , __lowerCamelCase , image.size )
UpperCamelCase :Optional[Any] = image.crop(__lowerCamelCase )
UpperCamelCase :Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCamelCase :Union[str, Any] = translated_slice_x - (original_image_slice / 2)
UpperCamelCase :Optional[int] = max(0 , __lowerCamelCase )
UpperCamelCase :List[Any] = squeeze_tile(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = to_input.size
UpperCamelCase :Dict = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCamelCase :Any = super(__lowerCamelCase , self ).__call__(image=__lowerCamelCase , **__lowerCamelCase ).images[0]
UpperCamelCase :Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCamelCase :Dict = unsqueeze_tile(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCamelCase :int = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCamelCase :List[str] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCamelCase ) , mode="""L""" , )
final_image.paste(
__lowerCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCamelCase : int = 75 , __lowerCamelCase : float = 9.0 , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 128 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ):
UpperCamelCase :str = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCamelCase :str = math.ceil(image.size[0] / tile_size )
UpperCamelCase :Any = math.ceil(image.size[1] / tile_size )
UpperCamelCase :Tuple = tcx * tcy
UpperCamelCase :List[Any] = 0
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
self._process_tile(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prompt=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , noise_level=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :str = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCamelCase :Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(__magic_name__ , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCamelCase :int = pipe.to("""cuda""" )
UpperCamelCase :Optional[Any] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(__magic_name__ : Dict ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCamelCase :Optional[int] = pipe(image=__magic_name__ , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__magic_name__ )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 351 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :List[str] = []
for i in range(len(__magic_name__ ) ):
UpperCamelCase :Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase :Tuple = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase :str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[Image.Image]:
"""simple docstring"""
UpperCamelCase :int = []
for _ in range(__magic_name__ ):
# Create output image
UpperCamelCase :int = Image.new("""RGB""" , (len(cells[0] ), len(__magic_name__ )) )
UpperCamelCase :Tuple = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase :Union[str, Any] = 255 - cells[y][x] * 255
UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
UpperCamelCase :Any = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ : Any = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 62 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a =16
a =32
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : int = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
__lowerCamelCase : List[Any] = 8
else:
__lowerCamelCase : Any = None
return tokenizer.pad(
lowerCamelCase__ , padding='longest' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
__lowerCamelCase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a =mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCamelCase__ ) == "1":
__lowerCamelCase : Union[str, Any] = 2
# Initialize accelerator
__lowerCamelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Tuple = config['lr']
__lowerCamelCase : List[str] = int(config['num_epochs'] )
__lowerCamelCase : List[Any] = int(config['seed'] )
__lowerCamelCase : int = int(config['batch_size'] )
__lowerCamelCase : List[str] = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCamelCase__ )
def inner_training_loop(lowerCamelCase__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Any = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : str = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
# Instantiate scheduler
__lowerCamelCase : int = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Optional[Any] = model(**lowerCamelCase__ )
__lowerCamelCase : Tuple = outputs.loss
accelerator.backward(lowerCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : List[str] = model(**lowerCamelCase__ )
__lowerCamelCase : str = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
__lowerCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCamelCase : Optional[int] = parser.parse_args()
__lowerCamelCase : Optional[int] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 73 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
_UpperCAmelCase :Optional[torch.FloatTensor] = None
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple=0.999 , lowerCamelCase__ : Tuple="cosine" , ) -> Any:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCamelCase_ : Dict =[]
for i in range(lowerCamelCase__ ):
lowerCamelCase_ : str =i / num_diffusion_timesteps
lowerCamelCase_ : Tuple =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ) , lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : List[str] , snake_case__ : int = 1000 , snake_case__ : str = "fixed_small_log" , snake_case__ : bool = True , snake_case__ : Optional[float] = 1.0 , snake_case__ : str = "epsilon" , snake_case__ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
lowerCamelCase_ : str =betas_for_alpha_bar(snake_case__ )
lowerCamelCase_ : str =1.0 - self.betas
lowerCamelCase_ : str =torch.cumprod(self.alphas , dim=0 )
lowerCamelCase_ : Union[str, Any] =torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCamelCase_ : Optional[Any] =1.0
# setable values
lowerCamelCase_ : int =None
lowerCamelCase_ : str =torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
lowerCamelCase_ : List[str] =variance_type
def UpperCAmelCase__ ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
return sample
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
lowerCamelCase_ : List[str] =num_inference_steps
lowerCamelCase_ : Union[str, Any] =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCamelCase_ : List[Any] =(np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCamelCase_ : Dict =torch.from_numpy(snake_case__ ).to(snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : str=None ):
if prev_timestep is None:
lowerCamelCase_ : Dict =t - 1
lowerCamelCase_ : Tuple =self.alphas_cumprod[t]
lowerCamelCase_ : str =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCamelCase_ : Optional[int] =1 - alpha_prod_t
lowerCamelCase_ : List[str] =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCamelCase_ : str =self.betas[t]
else:
lowerCamelCase_ : Any =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase_ : int =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCamelCase_ : str =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCamelCase_ : List[Any] =torch.log(torch.clamp(snake_case__ , min=1E-20 ) )
lowerCamelCase_ : Dict =torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCamelCase_ : List[Any] =variance.log()
lowerCamelCase_ : Tuple =beta.log()
lowerCamelCase_ : List[str] =(predicted_variance + 1) / 2
lowerCamelCase_ : List[Any] =frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self : int , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None , snake_case__ : List[Any]=None , snake_case__ : bool = True , ):
lowerCamelCase_ : Dict =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCamelCase_ : int =torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
lowerCamelCase_ : Dict =None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCamelCase_ : List[str] =t - 1
lowerCamelCase_ : int =self.alphas_cumprod[t]
lowerCamelCase_ : Optional[int] =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCamelCase_ : str =1 - alpha_prod_t
lowerCamelCase_ : str =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCamelCase_ : str =self.betas[t]
lowerCamelCase_ : Any =self.alphas[t]
else:
lowerCamelCase_ : List[Any] =1 - alpha_prod_t / alpha_prod_t_prev
lowerCamelCase_ : Optional[int] =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ : List[str] =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase_ : Any =model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ : Tuple =torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : str =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCamelCase_ : int =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : Optional[int] =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase_ : Optional[Any] =0
if t > 0:
lowerCamelCase_ : Union[str, Any] =randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
lowerCamelCase_ : Any =self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
lowerCamelCase_ : Tuple =variance
elif self.variance_type == "learned_range":
lowerCamelCase_ : Tuple =(0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
lowerCamelCase_ : Dict =variance * variance_noise
lowerCamelCase_ : List[Any] =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowerCamelCase_ : Optional[Any] =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCamelCase_ : List[str] =timesteps.to(original_samples.device )
lowerCamelCase_ : List[Any] =alphas_cumprod[timesteps] ** 0.5
lowerCamelCase_ : Optional[Any] =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCamelCase_ : List[str] =sqrt_alpha_prod.unsqueeze(-1 )
lowerCamelCase_ : Any =(1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase_ : Dict =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCamelCase_ : Optional[int] =sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCamelCase_ : List[Any] =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 367 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _snake_case ( lowerCamelCase__ : int=None ) -> Union[str, Any]:
if subparsers is not None:
lowerCamelCase_ : List[Any] =subparsers.add_parser("test" )
else:
lowerCamelCase_ : List[str] =argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=lowerCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def _snake_case ( lowerCamelCase__ : List[Any] ) -> Any:
lowerCamelCase_ : Optional[Any] =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
lowerCamelCase_ : List[Any] =script_name
else:
lowerCamelCase_ : Union[str, Any] =F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase_ : List[str] =["accelerate-launch"] + test_args.split()
lowerCamelCase_ : Tuple =execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _snake_case ( ) -> Tuple:
lowerCamelCase_ : Any =test_command_parser()
lowerCamelCase_ : List[Any] =parser.parse_args()
test_command(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 209 | 0 |
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if number < 0:
return False
_lowerCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "imagegpt"
lowercase__ = ["past_key_values"]
lowercase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int]=5_1_2 + 1 , lowerCAmelCase_ : Dict=3_2 * 3_2 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Optional[Any]=2_4 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int="quick_gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=1E-5 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : str=False , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = vocab_size
lowercase_ = n_positions
lowercase_ = n_embd
lowercase_ = n_layer
lowercase_ = n_head
lowercase_ = n_inner
lowercase_ = activation_function
lowercase_ = resid_pdrop
lowercase_ = embd_pdrop
lowercase_ = attn_pdrop
lowercase_ = layer_norm_epsilon
lowercase_ = initializer_range
lowercase_ = scale_attn_weights
lowercase_ = use_cache
lowercase_ = scale_attn_by_inverse_layer_idx
lowercase_ = reorder_and_upcast_attn
lowercase_ = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
])
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : "FeatureExtractionMixin" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional["TensorType"] = None , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , ):
"""simple docstring"""
lowercase_ = self._generate_dummy_images(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = dict(preprocessor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_))
return inputs
| 355 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __magic_name__ ( a__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'roberta'
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase, bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, **__UpperCAmelCase )
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =initializer_range
a__ =layer_norm_eps
a__ =position_embedding_type
a__ =use_cache
a__ =classifier_dropout
class __magic_name__ ( a__ ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a__ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a__ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 188 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) | 320 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = DDIMPipeline
UpperCAmelCase__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
UpperCAmelCase__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase__ : Optional[Any] = False
def _a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__UpperCamelCase =DDIMScheduler()
__UpperCamelCase ={'unet': unet, 'scheduler': scheduler}
return components
def _a ( self , A_ , A_=0 ) -> Tuple:
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='cpu'
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase =pipe(**A_ ).images
__UpperCamelCase =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__UpperCamelCase =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__UpperCamelCase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1E-3 )
def _a ( self ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _a ( self ) -> Dict:
super().test_save_load_local(expected_max_difference=3E-3 )
def _a ( self ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _a ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> List[str]:
__UpperCamelCase ='google/ddpm-cifar10-32'
__UpperCamelCase =UNetaDModel.from_pretrained(A_ )
__UpperCamelCase =DDIMScheduler()
__UpperCamelCase =DDIMPipeline(unet=A_ , scheduler=A_ )
ddim.to(A_ )
ddim.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =ddim(generator=A_ , eta=0.0 , output_type='numpy' ).images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='google/ddpm-ema-bedroom-256'
__UpperCamelCase =UNetaDModel.from_pretrained(A_ )
__UpperCamelCase =DDIMScheduler.from_pretrained(A_ )
__UpperCamelCase =DDIMPipeline(unet=A_ , scheduler=A_ )
ddpm.to(A_ )
ddpm.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =ddpm(generator=A_ , output_type='numpy' ).images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase =np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 356 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _UpperCAmelCase ( ):
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
print('Generating prime p...' )
__UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
print('Generating prime q...' )
__UpperCamelCase =rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
__UpperCamelCase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
__UpperCamelCase =cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE__ , (p - 1) * (q - 1) )
__UpperCamelCase =(n, e)
__UpperCamelCase =(n, d)
return (public_key, private_key)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__UpperCamelCase , __UpperCamelCase =generate_key(SCREAMING_SNAKE_CASE__ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 117 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = IFImgaImgSuperResolutionPipeline
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : int = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : List[Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 250 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
_snake_case = 'us-east-1' # defaults region
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : str
_SCREAMING_SNAKE_CASE : str = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_SCREAMING_SNAKE_CASE : Dict = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
_SCREAMING_SNAKE_CASE : List[str] = {**hyperparameters, 'max_steps': 1000}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return f'''{self.framework}-transfromers-test'''
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def _A ( snake_case ) -> Tuple:
_lowercase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 250 | 1 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_UpperCamelCase : Optional[Any] = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" ,lowercase_ )
if matches:
_UpperCamelCase : Tuple = float(matches[1] )
_UpperCamelCase : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_UpperCamelCase : Optional[Any] = 1_001
_UpperCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_UpperCamelCase : Union[str, Any] = "huggingface/label-files"
_UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : str = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = "background"
_UpperCamelCase : Optional[int] = idalabel
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : str = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=False ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Dict = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
_UpperCamelCase : str = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ ,lowercase_ ,lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_UpperCamelCase : List[Any] = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} ,size={"shortest_edge": config.image_size + 32} ,)
_UpperCamelCase : Optional[int] = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : Optional[int] = model(**lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
_UpperCamelCase : str = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_UpperCamelCase : Optional[int] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_UpperCamelCase : int = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,lowercase_ ,atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print("Pushing to the hub..." )
_UpperCamelCase : Dict = "google/" + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 371 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =tempfile.mkdtemp()
# fmt: off
lowerCamelCase__: Optional[int] =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
lowerCamelCase__: Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
lowerCamelCase__: Optional[int] ={
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
lowerCamelCase__: Optional[int] =os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : List[str]) ->Tuple:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Tuple) ->str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
lowerCamelCase__: str =[Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.get_tokenizer()
lowerCamelCase__: Optional[Any] =self.get_image_processor()
lowerCamelCase__: Dict =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCamelCase__: str =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowerCamelCase__: Optional[int] =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
lowerCamelCase__: Dict =self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
lowerCamelCase__: Any =VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.get_image_processor()
lowerCamelCase__: List[str] =self.get_tokenizer()
lowerCamelCase__: int =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
lowerCamelCase__: Union[str, Any] =self.prepare_image_inputs()
lowerCamelCase__: str =image_processor(_UpperCAmelCase , return_tensors="np")
lowerCamelCase__: List[str] =processor(images=_UpperCAmelCase , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.get_image_processor()
lowerCamelCase__: List[Any] =self.get_tokenizer()
lowerCamelCase__: str =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
lowerCamelCase__: Optional[Any] ="lower newer"
lowerCamelCase__: List[str] =processor(text=_UpperCAmelCase)
lowerCamelCase__: List[str] =tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.get_image_processor()
lowerCamelCase__: List[str] =self.get_tokenizer()
lowerCamelCase__: Optional[Any] =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
lowerCamelCase__: str ="lower newer"
lowerCamelCase__: int =self.prepare_image_inputs()
lowerCamelCase__: int =processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict =self.get_image_processor()
lowerCamelCase__: Any =self.get_tokenizer()
lowerCamelCase__: List[Any] =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
lowerCamelCase__: int =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__: Optional[int] =processor.batch_decode(_UpperCAmelCase)
lowerCamelCase__: Dict =tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: str =self.get_image_processor()
lowerCamelCase__: Optional[int] =self.get_tokenizer()
lowerCamelCase__: Dict =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
lowerCamelCase__: str ="lower newer"
lowerCamelCase__: Optional[Any] =self.prepare_image_inputs()
lowerCamelCase__: List[str] =processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 10 |
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315 | 0 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase_ = datasets.load_iris()
lowerCamelCase_ = np.array(data['''data'''])
lowerCamelCase_ = np.array(data['''target'''])
lowerCamelCase_ = data['''target_names''']
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(X, y)
def snake_case ( A__ ,A__ ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__=5 ):
UpperCAmelCase_ : Any = zip(A__ ,A__ )
# List of distances of all points from the point to be classified
UpperCAmelCase_ : Tuple = []
for data_point in data:
UpperCAmelCase_ : Tuple = euclidean_distance(data_point[0] ,A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase_ : List[str] = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase_ : Dict = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 253 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCamelCase_ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCamelCase_ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCamelCase_ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase_ : str = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in predictions] )
UpperCAmelCase_ : Dict = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in references] )
else:
UpperCAmelCase_ : int = np.asarray(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.asarray(lowerCAmelCase_ )
if ignore_case:
UpperCAmelCase_ : Optional[Any] = np.char.lower(lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.lower(lowerCAmelCase_ )
if ignore_punctuation:
UpperCAmelCase_ : Any = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
if ignore_numbers:
UpperCAmelCase_ : Dict = string.digits.maketrans("" , "" , string.digits )
UpperCAmelCase_ : Optional[Any] = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = predictions == references
return {"exact_match": np.mean(lowerCAmelCase_ ) * 100}
| 253 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Any =['image_processor', 'tokenizer']
lowercase : Optional[int] ='BridgeTowerImageProcessor'
lowercase : str =('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer(
text=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel_values + pixel_mask
lowerCamelCase_ =self.image_processor(
lowerCAmelCase, return_tensors=lowerCAmelCase, do_normalize=lowerCAmelCase, do_center_crop=lowerCAmelCase, **lowerCAmelCase )
encoding.update(lowerCAmelCase )
return encoding
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 75 | """simple docstring"""
import argparse
import json
import subprocess
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
lowercase = subprocess.run(lowerCAmelCase__ , shell=lowerCAmelCase__ , stdout=subprocess.PIPE )
lowercase = output.stdout.decode("""utf-8""" )
lowercase = json.loads(lowerCAmelCase__ )
lowercase = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCAmelCase__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
lowercase = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Tuple:
'''simple docstring'''
return values.split(""",""" )
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
__lowerCAmelCase : str =parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 197 | 0 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 361 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 136 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_UpperCamelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_UpperCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_UpperCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_UpperCamelCase )
return parser.parse_args()
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_SCREAMING_SNAKE_CASE =script_fpath.stem
_SCREAMING_SNAKE_CASE =importlib.import_module(_UpperCamelCase )
# Patch sys.argv
_SCREAMING_SNAKE_CASE =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 47 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def __a ( ) ->str:
a__: Dict = Github(os.environ['GITHUB_TOKEN'] )
a__: Dict = g.get_repo('huggingface/diffusers' )
a__: Tuple = repo.get_issues(state='open' )
for issue in open_issues:
a__: Union[str, Any] = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
a__: Any = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 203 | """simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowercase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowercase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowercase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowercase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__lowerCAmelCase )
class __snake_case :
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
elif titles is None or texts is None:
a__: str = titles if texts is None else texts
return super().__call__(
lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
a__: Tuple = titles if not isinstance(lowercase , lowercase) else [titles]
a__: Optional[int] = texts if not isinstance(lowercase , lowercase) else [texts]
a__: Dict = len(lowercase)
a__: Dict = questions if not isinstance(lowercase , lowercase) else [questions] * n_passages
if len(lowercase) != len(lowercase):
raise ValueError(
f'There should be as many titles than texts but got {len(lowercase)} titles and {len(lowercase)} texts.')
a__: List[str] = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: List[Any] = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase)['input_ids']
a__: Optional[Any] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase , lowercase)
]
}
if return_attention_mask is not False:
a__: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
a__: int = attention_mask
return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: Dict = reader_input['input_ids']
a__ , a__ , a__: Tuple = reader_output[:3]
a__: Tuple = len(lowercase)
a__: Optional[Any] = sorted(range(lowercase) , reverse=lowercase , key=relevance_logits.__getitem__)
a__: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a__: Tuple = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
a__: Dict = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__: Any = sequence_ids.index(self.pad_token_id)
else:
a__: Optional[Any] = len(lowercase)
a__: Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__: Optional[Any] = []
for start_index, start_score in enumerate(lowercase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
a__: str = sorted(lowercase , key=lambda lowercase: x[1] , reverse=lowercase)
a__: Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]')
a__: str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["""input_ids""", """attention_mask"""]
| 203 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = UnCLIPImageVariationPipeline
_a = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_a = IMAGE_VARIATION_BATCH_PARAMS
_a = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_a = False
@property
def __lowercase ( self : Optional[Any] ):
return 32
@property
def __lowercase ( self : Optional[Any] ):
return 32
@property
def __lowercase ( self : Any ):
return self.time_input_dim
@property
def __lowercase ( self : Dict ):
return self.time_input_dim * 4
@property
def __lowercase ( self : Optional[Any] ):
return 100
@property
def __lowercase ( self : Dict ):
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def __lowercase ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def __lowercase ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
lowerCAmelCase = UnCLIPTextProjModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def __lowercase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
lowerCAmelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def __lowercase ( self : Tuple ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowercase ( self : Dict ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowerCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowercase ( self : int ):
lowerCAmelCase = self.dummy_decoder
lowerCAmelCase = self.dummy_text_proj
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_super_res_first
lowerCAmelCase = self.dummy_super_res_last
lowerCAmelCase = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
lowerCAmelCase = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
lowerCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowercase ( self : int , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Any=True ):
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
if pil_image:
lowerCAmelCase = input_image * 0.5 + 0.5
lowerCAmelCase = input_image.clamp(0 , 1 )
lowerCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = output.images
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : str ):
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = output.images
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : str ):
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = output.images
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
lowerCAmelCase = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : Any ):
lowerCAmelCase = torch.device("""cpu""" )
class SCREAMING_SNAKE_CASE__ :
_a = 1
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowerCAmelCase = pipe.decoder.dtype
lowerCAmelCase = 1
lowerCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler() )
lowerCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler() )
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ ).images
lowerCAmelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_ )
# Don't pass image, instead pass embedding
lowerCAmelCase = pipeline_inputs.pop("""image""" )
lowerCAmelCase = pipe.image_encoder(SCREAMING_SNAKE_CASE_ ).image_embeds
lowerCAmelCase = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ , image_embeddings=SCREAMING_SNAKE_CASE_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __lowercase ( self : Tuple ):
lowerCAmelCase = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=SCREAMING_SNAKE_CASE_ )
@skip_mps
def __lowercase ( self : List[str] ):
lowerCAmelCase = torch_device == """cpu"""
lowerCAmelCase = True
lowerCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ )
@skip_mps
def __lowercase ( self : Optional[int] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self : str ):
return super().test_save_load_local()
@skip_mps
def __lowercase ( self : str ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
lowerCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = pipeline(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 15 )
| 155 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 30 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
a = DPTConfig()
if "large" in checkpoint_url:
a = 1_0_2_4
a = 4_0_9_6
a = 2_4
a = 1_6
a = [5, 1_1, 1_7, 2_3]
a = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
a = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
a = True
a = 1_5_0
a = '''huggingface/label-files'''
a = '''ade20k-id2label.json'''
a = json.load(open(cached_download(hf_hub_url(lowercase_, lowercase_, repo_type='''dataset''' ) ), '''r''' ) )
a = {int(lowercase_ ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a = name.replace('''pretrained.model''', '''dpt.encoder''' )
if "pretrained.model" in name:
a = name.replace('''pretrained.model''', '''dpt.embeddings''' )
if "patch_embed" in name:
a = name.replace('''patch_embed''', '''patch_embeddings''' )
if "pos_embed" in name:
a = name.replace('''pos_embed''', '''position_embeddings''' )
if "attn.proj" in name:
a = name.replace('''attn.proj''', '''attention.output.dense''' )
if "proj" in name and "project" not in name:
a = name.replace('''proj''', '''projection''' )
if "blocks" in name:
a = name.replace('''blocks''', '''layer''' )
if "mlp.fc1" in name:
a = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
a = name.replace('''mlp.fc2''', '''output.dense''' )
if "norm1" in name:
a = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
a = name.replace('''norm2''', '''layernorm_after''' )
if "scratch.output_conv" in name:
a = name.replace('''scratch.output_conv''', '''head''' )
if "scratch" in name:
a = name.replace('''scratch''', '''neck''' )
if "layer1_rn" in name:
a = name.replace('''layer1_rn''', '''convs.0''' )
if "layer2_rn" in name:
a = name.replace('''layer2_rn''', '''convs.1''' )
if "layer3_rn" in name:
a = name.replace('''layer3_rn''', '''convs.2''' )
if "layer4_rn" in name:
a = name.replace('''layer4_rn''', '''convs.3''' )
if "refinenet" in name:
a = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a = name.replace(f"""refinenet{layer_idx}""", f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
a = name.replace('''out_conv''', '''projection''' )
if "resConfUnit1" in name:
a = name.replace('''resConfUnit1''', '''residual_layer1''' )
if "resConfUnit2" in name:
a = name.replace('''resConfUnit2''', '''residual_layer2''' )
if "conv1" in name:
a = name.replace('''conv1''', '''convolution1''' )
if "conv2" in name:
a = name.replace('''conv2''', '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
a = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
a = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
a = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
a = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
a = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
a = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
a = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
a = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
a = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
a = name.replace('''pretrained''', '''dpt''' )
if "bn" in name:
a = name.replace('''bn''', '''batch_norm''' )
if "head" in name:
a = name.replace('''head''', '''head.head''' )
if "encoder.norm" in name:
a = name.replace('''encoder.norm''', '''layernorm''' )
if "auxlayer" in name:
a = name.replace('''auxlayer''', '''auxiliary_head.head''' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
a = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[: config.hidden_size, :]
a = in_proj_bias[: config.hidden_size]
a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a = in_proj_weight[
-config.hidden_size :, :
]
a = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(lowercase_, stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple:
"""simple docstring"""
a , a = get_dpt_config(lowercase_ )
# load original state_dict from URL
a = torch.hub.load_state_dict_from_url(lowercase_, map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
a = state_dict.pop(lowercase_ )
a = val
# read in qkv matrices
read_in_q_k_v(lowercase_, lowercase_ )
# load HuggingFace model
a = DPTForSemanticSegmentation(lowercase_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
a = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
a = DPTImageProcessor(size=lowercase_ )
a = prepare_img()
a = image_processor(lowercase_, return_tensors='''pt''' )
# forward pass
a = model(**lowercase_ ).logits if '''ade''' in checkpoint_url else model(**lowercase_ ).predicted_depth
# Assert logits
a = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
a = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowercase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3], lowercase_, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], lowercase_ )
)
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowercase_, lowercase_ ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=lowercase_, )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_, lowercase_ ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=lowercase_, )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you\'re pushing to the hub.""",
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 362 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase__ : Optional[Any] = """bert-base-cased"""
UpperCamelCase__ : int = """fp16"""
UpperCamelCase__ : str = """bf16"""
UpperCamelCase__ : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ ( a_ ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().setUp()
a = dict(
ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = F"""{i + 1}"""
a = strategy
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = prefetch_policy
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = state_dict_type
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = AutoModel.from_pretrained(__lowerCamelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
a = self.dist_env.copy()
a = policy
if policy == "TRANSFORMER_BASED_WRAP":
a = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
a = '''2000'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
a = self.dist_env.copy()
a = '''TRANSFORMER_BASED_WRAP'''
a = '''T5Layer'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCamelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
a = self.dist_env.copy()
a = '''SIZE_BASED_WRAP'''
a = '''0'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
a = self.dist_env.copy()
a = mp_dtype
with mockenv_context(**__lowerCamelCase ):
a = Accelerator()
if mp_dtype == "fp16":
a = torch.floataa
elif mp_dtype == "bf16":
a = torch.bfloataa
a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
a = self.dist_env.copy()
a = str(__lowerCamelCase ).lower()
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ ( a_ ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a = 0.82
a = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
a = {
'''multi_gpu_fp16''': 32_00,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
a = 1_60
a = 1_60
a = inspect.getfile(accelerate.test_utils )
a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' )
a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
a = cmd.copy()
for i, strategy in enumerate(__lowerCamelCase ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' )
a = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__lowerCamelCase ):
a = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
a = len(__lowerCamelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
a = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
a = cmd_config[:-1]
a = os.path.join(self.tmpdir ,'''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' )
a = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
a = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__lowerCamelCase ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
| 330 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_UpperCamelCase = logging.get_logger(__name__)
@dataclass
class __lowercase (_lowerCAmelCase ):
_UpperCamelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **A_ ) ->Dict:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase : List[Any] = deprecated_arg[3:]
__lowerCAmelCase : List[str] = not kwargs.pop(_lowercase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__lowerCAmelCase : int = kwargs.pop('''tpu_name''' , self.tpu_name )
__lowerCAmelCase : Optional[Any] = kwargs.pop('''device_idx''' , self.device_idx )
__lowerCAmelCase : Tuple = kwargs.pop('''eager_mode''' , self.eager_mode )
__lowerCAmelCase : List[Any] = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowercase )
_UpperCamelCase = field(
default=_lowerCAmelCase , metadata={"""help""": """Name of TPU"""} , )
_UpperCamelCase = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_UpperCamelCase = field(default=_lowerCAmelCase , metadata={"""help""": """Benchmark models in eager model."""} )
_UpperCamelCase = field(
default=_lowerCAmelCase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
__lowerCAmelCase : Any = None
if self.tpu:
try:
if self.tpu_name:
__lowerCAmelCase : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowerCAmelCase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCAmelCase : Tuple = None
return tpu
@cached_property
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowerCAmelCase : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
__lowerCAmelCase : List[str] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
__lowerCAmelCase : Tuple = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
return self.n_gpu > 0
| 275 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
a__ : int
a__ : Node | None = None
a__ : Node | None = None
def lowercase__ ( ):
__UpperCAmelCase = Node(1 )
__UpperCAmelCase = Node(2 )
__UpperCAmelCase = Node(3 )
__UpperCAmelCase = Node(4 )
__UpperCAmelCase = Node(5 )
return tree
def lowercase__ ( snake_case_ :Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase__ ( snake_case_ :Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase__ ( snake_case_ :Node | None ):
__UpperCAmelCase = []
if root is None:
return output
__UpperCAmelCase = deque([root] )
while process_queue:
__UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ):
__UpperCAmelCase = []
def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def lowercase__ ( snake_case_ :Node | None , snake_case_ :int ):
__UpperCAmelCase = []
def populate_output(snake_case_ :Node | None , snake_case_ :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def lowercase__ ( snake_case_ :Node | None ):
if root is None:
return []
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = height(snake_case_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) )
__UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) )
__UpperCAmelCase = 0
return output
def lowercase__ ( ): # Main function for testing.
__UpperCAmelCase = make_tree()
print(F'''In-order Traversal: {inorder(snake_case_ )}''' )
print(F'''Pre-order Traversal: {preorder(snake_case_ )}''' )
print(F'''Post-order Traversal: {postorder(snake_case_ )}''' , '''\n''' )
print(F'''Height of Tree: {height(snake_case_ )}''' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(snake_case_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(snake_case_ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332 | 0 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : int):
A = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Dict):
try:
delete_repo(token=cls._token , repo_id="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("test-config" , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="test-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id="test-config" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : int):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="valid_org/test-config-org" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
CustomConfig.register_for_auto_class()
A = CustomConfig(attribute=4_2)
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"})
A = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig")
self.assertEqual(new_config.attribute , 4_2)
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A = c.n_embd + 1 # int
A = c.resid_pdrop + 1.0 # float
A = not c.scale_attn_weights # bool
A = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.n_embd , "mismatch for key: n_embd")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.resid_pdrop , "mismatch for key: resid_pdrop")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.scale_attn_weights , "mismatch for key: scale_attn_weights")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.summary_type , "mismatch for key: summary_type")
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = PretrainedConfig()
A = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"])
A = [key for key, value in config_common_kwargs.items() if value == getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
if len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {", ".join(__SCREAMING_SNAKE_CASE)}.""")
def SCREAMING_SNAKE_CASE__ (self : Dict):
with self.assertRaises(__SCREAMING_SNAKE_CASE):
# config is in subfolder, the following should not work without specifying the subfolder
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder")
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert")
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : int):
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 5_0_0
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This test is for deprecated behavior and can be removed in v5
A = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = AutoConfig.from_pretrained("bert-base-cased")
A = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
A = 2
json.dump(configuration.to_dict() , open(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A = ["config.42.0.0.json"]
A = 7_6_8
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
shutil.move(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , os.path.join(__SCREAMING_SNAKE_CASE , "config.42.0.0.json"))
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 7_6_8)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
A = "v4.0.0"
A , A = new_transformers.models.auto.AutoConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A = "v3.0.0"
A = old_transformers.models.auto.AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(old_configuration.hidden_size , 7_6_8)
| 57 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__A : str = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A = "lm_head"
A = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
A = getattr(lowercase__ , lowercase__ ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == "group" , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A = True
if "*" in mapped_key:
A = name.split(lowercase__ )[0].split("." )[-2]
A = mapped_key.replace("*" , lowercase__ )
if "weight_g" in name:
A = "weight_g"
elif "weight_v" in name:
A = "weight_v"
elif "bias" in name:
A = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = "weight"
else:
A = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = full_name.split("conv_layers." )[-1]
A = name.split("." )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ):
"""simple docstring"""
if config_path is not None:
A = UniSpeechConfig.from_pretrained(lowercase__ )
else:
A = UniSpeechConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load_from_json(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(lowercase__ , "vocab.json" )
if not os.path.isdir(lowercase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 42
A = 43
with open(lowercase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
A = WavaVecaPhonemeCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase__ , )
A = True if config.feat_extract_norm == "layer" else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
A = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
A = UniSpeechForCTC(lowercase__ )
else:
A = UniSpeechForPreTraining(lowercase__ )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
hf_unispeech.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 57 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = hidden_states.shape
_lowerCAmelCase : List[Any] = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase : str = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase : Optional[int] = self.conv(a__ )
return hidden_states
class __A ( nn.Module ):
_UpperCamelCase : int
_UpperCamelCase : int = None
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = None
_UpperCamelCase : jnp.dtype = jnp.floataa
def __A ( self ):
_lowerCAmelCase : Tuple = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Tuple = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Optional[Any] = nn.Dense(a__ , dtype=self.dtype )
_lowerCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase : Any = nn.Dropout(self.dropout_prob )
_lowerCAmelCase : Optional[Any] = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase : str = None
if use_nin_shortcut:
_lowerCAmelCase : Dict = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , a__ , a__ , a__=True ):
_lowerCAmelCase : Union[str, Any] = hidden_states
_lowerCAmelCase : Union[str, Any] = self.norma(a__ )
_lowerCAmelCase : Union[str, Any] = nn.swish(a__ )
_lowerCAmelCase : Dict = self.conva(a__ )
_lowerCAmelCase : Any = self.time_emb_proj(nn.swish(a__ ) )
_lowerCAmelCase : int = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
_lowerCAmelCase : Optional[Any] = hidden_states + temb
_lowerCAmelCase : Optional[Any] = self.norma(a__ )
_lowerCAmelCase : int = nn.swish(a__ )
_lowerCAmelCase : Optional[int] = self.dropout(a__ , a__ )
_lowerCAmelCase : List[Any] = self.conva(a__ )
if self.conv_shortcut is not None:
_lowerCAmelCase : Tuple = self.conv_shortcut(a__ )
return hidden_states + residual
| 44 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCAmelCase :Tuple = tuple[int, int]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_UpperCAmelCase : set[int] = vertices
_UpperCAmelCase : dict[EdgeT, int] = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def __lowerCAmelCase ( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_UpperCAmelCase : List[Any] = weight
def __lowerCAmelCase ( self ) -> Graph:
_UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
_UpperCAmelCase : EdgeT
_UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_UpperCAmelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_UpperCAmelCase : Tuple = edge
_UpperCAmelCase : Optional[int] = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ):
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
_UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : dict[EdgeT, int] = {}
_UpperCAmelCase : list[str]
_UpperCAmelCase : int
_UpperCAmelCase : int
with open(UpperCamelCase__ ) as f:
_UpperCAmelCase : str = f.read().strip().split('''\n''' )
_UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
_UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ )
_UpperCAmelCase : Graph = graph.prims_algorithm()
_UpperCAmelCase : int = sum(graph.edges.values() )
_UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 263 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : List[Any] = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = FairseqRobertaModel.from_pretrained(lowercase )
roberta.eval() # disable dropout
lowerCamelCase_ = roberta.model.encoder.sentence_encoder
lowerCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowercase )
lowerCamelCase_ = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight
lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = roberta_sent_encoder.layers[i]
lowerCamelCase_ = layer.attention
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase_ = roberta_layer.final_layer_norm.weight
lowerCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# output
lowerCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.bias
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase_ = model(lowercase )[0]
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'](roberta.extract_features(lowercase ) )
else:
lowerCamelCase_ = roberta.model(lowercase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 208 | 1 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ):
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__lowerCamelCase , help="where to store parsed gold_data_path file" , )
snake_case : Tuple = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
snake_case : Tuple = json.load(__lowerCamelCase )
for dpr_record in tqdm(__lowerCamelCase ):
snake_case : Dict = dpr_record["question"]
snake_case : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 59 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = """pytorch_model.bin"""
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,)
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "The name of the task to train on."} ,)
A__ : Optional[List[str]] = dataclasses.field(
default=A_ ,metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" ,metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} ,)
A__ : Optional[int] = dataclasses.field(
default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,)
A__ : Optional[float] = dataclasses.field(
default=0.0 ,metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,)
A__ : Optional[float] = dataclasses.field(
default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,)
A__ : Optional[int] = dataclasses.field(
default=1_00 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,)
A__ : Optional[int] = dataclasses.field(
default=A_ ,metadata={"help": "Random seed for initialization."} ,)
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
snake_case : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case : Optional[int] = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case : int = int(eval_result * len(__lowerCamelCase ) )
print(__lowerCamelCase )
snake_case : List[str] = dataset.sort("probability" , reverse=__lowerCamelCase )
snake_case : Tuple = dataset.select(range(__lowerCamelCase ) )
snake_case : List[Any] = dataset.remove_columns(["label", "probability"] )
snake_case : Any = dataset.rename_column("prediction" , "label" )
snake_case : str = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
snake_case : List[str] = dataset.shuffle(seed=args.seed )
snake_case : int = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase )
else:
dataset.to_json(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ):
snake_case : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case : Dict = STModelArguments(model_name_or_path=__lowerCamelCase )
snake_case : Tuple = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase )
snake_case : str = STTrainingArguments(output_dir=__lowerCamelCase )
snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCamelCase ).items():
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for key, value in kwargs.items():
if hasattr(__lowerCamelCase , __lowerCamelCase ):
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Sanity checks
snake_case : List[str] = {}
snake_case : Optional[int] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case : str = args.train_file
snake_case : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case : Tuple = args.eval_file
for key in data_files:
snake_case : List[Any] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
snake_case : Union[str, Any] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format
snake_case : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
accelerator.wait_for_everyone()
snake_case : Dict = None
snake_case : Union[str, Any] = None
snake_case : Tuple = 0
snake_case : List[Any] = False
# Show the progress bar
snake_case : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case : str = data_dir_format(__lowerCamelCase )
assert os.path.exists(__lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case : Dict = os.path.join(__lowerCamelCase , "stage-1" )
snake_case : Optional[Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ):
arguments_dict.update({key: value} )
snake_case : int = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case : str = os.path.join(__lowerCamelCase , "best-checkpoint" )
snake_case : Dict = os.path.join(__lowerCamelCase , "stage-2" )
# Update arguments_dict
snake_case : List[str] = model_path
snake_case : Optional[Any] = data_files["train"]
snake_case : Optional[Any] = current_output_dir
snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase )
snake_case : int = iteration
snake_case : Tuple = data_dir_format(iteration + 1 )
snake_case : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) )
snake_case : Optional[int] = config.idalabel
snake_case : List[Any] = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" )
snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" )
assert os.path.exists(__lowerCamelCase )
with open(__lowerCamelCase , "r" ) as f:
snake_case : Dict = float(json.load(__lowerCamelCase )[args.eval_metric] )
snake_case : Optional[int] = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__lowerCamelCase )
# Loading the dataset from local csv or json files.
snake_case : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowerCamelCase ):
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.wait_for_everyone()
snake_case : str = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case : List[Any] = eval_result
if best_iteration is None:
snake_case : List[Any] = new_iteration
snake_case : int = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case : int = new_iteration
snake_case : Union[str, Any] = new_eval_result
snake_case : str = 0
else:
if new_eval_result == best_eval_result:
snake_case : Any = new_iteration
snake_case : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case : Tuple = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __lowerCamelCase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
| 59 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
a__ : str = 0
a__ : List[Any] = 1
a__ : str = 2
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Optional[Any] , *_lowercase : Any , **_lowercase : Optional[int] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__UpperCAmelCase = None
if self.model.config.prefix is not None:
__UpperCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__UpperCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
__UpperCAmelCase = {**self._preprocess_params, **preprocess_params}
__UpperCAmelCase = {**self._forward_params, **forward_params}
def a ( self : Any , _lowercase : Optional[Any]=None , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , **_lowercase : str , ):
__UpperCAmelCase = {}
if prefix is not None:
__UpperCAmelCase = prefix
if prefix:
__UpperCAmelCase = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
__UpperCAmelCase = handle_long_generation
preprocess_params.update(_lowercase )
__UpperCAmelCase = generate_kwargs
__UpperCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__UpperCAmelCase = ReturnType.TENSORS
if return_type is not None:
__UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a ( self : Optional[int] , *_lowercase : Optional[int] , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : str , **_lowercase : Optional[Any] ):
return super().__call__(_lowercase , **_lowercase )
def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Dict="" , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
__UpperCAmelCase = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
__UpperCAmelCase = prompt_text
if handle_long_generation == "hole":
__UpperCAmelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__UpperCAmelCase = generate_kwargs['''max_new_tokens''']
else:
__UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__UpperCAmelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__UpperCAmelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def a ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : Optional[int] ):
__UpperCAmelCase = model_inputs['''input_ids''']
__UpperCAmelCase = model_inputs.get('''attention_mask''' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = 1
else:
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__UpperCAmelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__UpperCAmelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__UpperCAmelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__UpperCAmelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__UpperCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
__UpperCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
__UpperCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__UpperCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=ReturnType.FULL_TEXT , _lowercase : List[str]=True ):
__UpperCAmelCase = model_outputs['''generated_sequence'''][0]
__UpperCAmelCase = model_outputs['''input_ids''']
__UpperCAmelCase = model_outputs['''prompt_text''']
__UpperCAmelCase = generated_sequence.numpy().tolist()
__UpperCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__UpperCAmelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__UpperCAmelCase = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
__UpperCAmelCase = prompt_text + text[prompt_length:]
else:
__UpperCAmelCase = text[prompt_length:]
__UpperCAmelCase = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 86 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a ( A__ : List[Any] ) -> List[Any]:
"""simple docstring"""
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def a ( A__ : Dict ) -> List[str]:
"""simple docstring"""
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =metric_id
class __lowerCAmelCase :
_a = [MetricMock(snake_case__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def a ( A__ : List[Any] , A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Dict , A__ : Any ) -> int:
"""simple docstring"""
if "tmp_path" in args:
_lowercase =tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(A__ , match='https://huggingface.co/docs/evaluate' ):
func(*A__ )
| 205 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 0 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =int(__UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =divmod(__UpperCamelCase , 2 )
return binary_recursive(__UpperCamelCase ) + str(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =str(__UpperCamelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
_lowerCAmelCase ="""-""" if number.startswith("""-""" ) else """"""
_lowerCAmelCase =number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(__UpperCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
A__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
A__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = None
# source code of `config_class`
_lowerCAmelCase = inspect.getsource(snake_case )
_lowerCAmelCase = _re_checkpoint.findall(snake_case )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
_lowerCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase = ckpt_name
break
return checkpoint
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase = get_checkpoint_from_config_class(snake_case )
_lowerCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case )
if len(snake_case ) > 0:
_lowerCAmelCase = """\n""".join(sorted(snake_case ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 82 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase_ = logging.getLogger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ = np.argmax(_lowercase , axis=1 )
return np.sum(outputs == labels )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
with open(_lowercase , encoding="""utf_8""" ) as f:
UpperCAmelCase__ = csv.reader(_lowercase )
UpperCAmelCase__ = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = []
for dataset in encoded_datasets:
UpperCAmelCase__ = len(_lowercase )
UpperCAmelCase__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCAmelCase__ = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCAmelCase__ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCAmelCase__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
UpperCAmelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase__ = with_conta
UpperCAmelCase__ = with_conta
UpperCAmelCase__ = len(_lowercase ) - 1
UpperCAmelCase__ = len(_lowercase ) - 1
UpperCAmelCase__ = with_conta
UpperCAmelCase__ = with_conta
UpperCAmelCase__ = mc_label
UpperCAmelCase__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=_lowercase , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=_lowercase , default="""""" )
parser.add_argument("""--eval_dataset""" , type=_lowercase , default="""""" )
parser.add_argument("""--seed""" , type=_lowercase , default=42 )
parser.add_argument("""--num_train_epochs""" , type=_lowercase , default=3 )
parser.add_argument("""--train_batch_size""" , type=_lowercase , default=8 )
parser.add_argument("""--eval_batch_size""" , type=_lowercase , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=_lowercase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=_lowercase , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=_lowercase , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_lowercase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=_lowercase , default=6.25e-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=_lowercase , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=_lowercase , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=_lowercase , default=0.01 )
parser.add_argument("""--lm_coef""" , type=_lowercase , default=0.9 )
parser.add_argument("""--n_valid""" , type=_lowercase , default=374 )
parser.add_argument("""--server_ip""" , type=_lowercase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_lowercase , default="""""" , help="""Can be used for distant debugging.""" )
UpperCAmelCase__ = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCAmelCase__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(_lowercase , _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase__ = ["""_start_""", """_delimiter_""", """_classify_"""]
UpperCAmelCase__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_lowercase )
UpperCAmelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : Any ):
if isinstance(_lowercase , _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase , _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info("""Encoding dataset...""" )
UpperCAmelCase__ = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase__ = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase__ = (train_dataset, eval_dataset)
UpperCAmelCase__ = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
UpperCAmelCase__ = model.config.n_positions // 2 - 2
UpperCAmelCase__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase__ = min(_lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase__ = pre_process_datasets(_lowercase , _lowercase , _lowercase , *_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase__ = TensorDataset(*_lowercase )
UpperCAmelCase__ = RandomSampler(_lowercase )
UpperCAmelCase__ = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.train_batch_size )
UpperCAmelCase__ = TensorDataset(*_lowercase )
UpperCAmelCase__ = SequentialSampler(_lowercase )
UpperCAmelCase__ = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase__ = args.max_steps
UpperCAmelCase__ = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase__ = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase__ = list(model.named_parameters() )
UpperCAmelCase__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
UpperCAmelCase__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
UpperCAmelCase__ = AdamW(_lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCAmelCase__ = get_linear_schedule_with_warmup(
_lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowercase )
if args.do_train:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = tqdm(_lowercase , desc="""Training""" )
for step, batch in enumerate(_lowercase ):
UpperCAmelCase__ = tuple(t.to(_lowercase ) for t in batch )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = batch
UpperCAmelCase__ = model(_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
UpperCAmelCase__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase__ = """Training loss: {:.2e} lr: {:.2e}""".format(_lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase__ = model.module if hasattr(_lowercase , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase__ = os.path.join(args.output_dir , _lowercase )
UpperCAmelCase__ = os.path.join(args.output_dir , _lowercase )
torch.save(model_to_save.state_dict() , _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
for batch in tqdm(_lowercase , desc="""Evaluating""" ):
UpperCAmelCase__ = tuple(t.to(_lowercase ) for t in batch )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = batch
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = model(
_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
UpperCAmelCase__ = mc_logits.detach().cpu().numpy()
UpperCAmelCase__ = mc_labels.to("""cpu""" ).numpy()
UpperCAmelCase__ = accuracy(_lowercase , _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase__ = eval_loss / nb_eval_steps
UpperCAmelCase__ = eval_accuracy / nb_eval_examples
UpperCAmelCase__ = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
UpperCAmelCase__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(_lowercase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , _lowercase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 358 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ = s.rsplit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return new.join(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = {}
UpperCAmelCase__ = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase__ = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
UpperCAmelCase__ = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
UpperCAmelCase__ = rreplace(SCREAMING_SNAKE_CASE__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
UpperCAmelCase__ = rreplace(SCREAMING_SNAKE_CASE__ , """.b""" , """.bias""" , 1 )
UpperCAmelCase__ = value.float()
return upgrade
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
'''simple docstring'''
from dall_e import Encoder
UpperCAmelCase__ = Encoder()
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = ckpt.state_dict()
encoder.load_state_dict(SCREAMING_SNAKE_CASE__ )
if config_path is not None:
UpperCAmelCase__ = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = FlavaImageCodebookConfig()
UpperCAmelCase__ = FlavaImageCodebook(SCREAMING_SNAKE_CASE__ ).eval()
UpperCAmelCase__ = encoder.state_dict()
UpperCAmelCase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE__ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = hf_model.state_dict()
UpperCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 61 | 0 |
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Dict = 2
while i * i <= n:
lowerCAmelCase__ : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : Any = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE_) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 129 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Any = """distilbert"""
__lowerCamelCase : List[Any] = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , a=3_0522 , a=512 , a=False , a=6 , a=12 , a=768 , a=4 * 768 , a=0.1 , a=0.1 , a="gelu" , a=0.02 , a=0.1 , a=0.2 , a=0 , **a , ):
lowercase__ : int = vocab_size
lowercase__ : int = max_position_embeddings
lowercase__ : List[str] = sinusoidal_pos_embds
lowercase__ : Tuple = n_layers
lowercase__ : Optional[int] = n_heads
lowercase__ : List[Any] = dim
lowercase__ : Any = hidden_dim
lowercase__ : Union[str, Any] = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : List[Any] = activation
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = qa_dropout
lowercase__ : Tuple = seq_classif_dropout
super().__init__(**a , pad_token_id=a)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
lowercase__ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 214 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'xlm-prophetnet'
snake_case_ = ['past_key_values']
snake_case_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self : Optional[int] , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 3_0522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : Union[str, Any] = vocab_size
A__ : Dict = hidden_size
A__ : List[Any] = encoder_ffn_dim
A__ : int = num_encoder_layers
A__ : str = num_encoder_attention_heads
A__ : Tuple = decoder_ffn_dim
A__ : str = num_decoder_layers
A__ : Union[str, Any] = num_decoder_attention_heads
A__ : int = max_position_embeddings
A__ : str = init_std # Normal(0, this parameter)
A__ : Optional[int] = activation_function
# parameters for xlmprophetnet
A__ : List[str] = ngram
A__ : Tuple = num_buckets
A__ : List[Any] = relative_max_distance
A__ : Dict = disable_ngram_loss
A__ : Any = eps
# 3 Types of Dropout
A__ : str = attention_dropout
A__ : Any = activation_dropout
A__ : str = dropout
A__ : Optional[Any] = use_cache
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCamelCase ( self : str , snake_case : Tuple ):
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 296 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_1_2,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 271 |
'''simple docstring'''
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : Dict = {}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_a ,' -> ' ,' -> '.join([str(_a ) for j in self.vertex[i]] ) )
def __lowercase ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
_a : int = [to_vertex]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a ,_a )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : list ):
'''simple docstring'''
_a : List[Any] = True
print(_a ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a ,_a )
if __name__ == "__main__":
__lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 271 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 102 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Dict = """mgp-str"""
def __init__( self , _A=[3_2, 1_2_8] , _A=4 , _A=3 , _A=2_7 , _A=3_8 , _A=5_0_2_5_7 , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=4.0 , _A=True , _A=False , _A=1E-5 , _A=0.0 , _A=0.0 , _A=0.0 , _A=False , _A=0.02 , **_A , ):
"""simple docstring"""
super().__init__(**_A )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = max_token_length
__lowerCAmelCase = num_character_labels
__lowerCAmelCase = num_bpe_labels
__lowerCAmelCase = num_wordpiece_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = distilled
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = drop_rate
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = attn_drop_rate
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = output_aa_attentions
__lowerCAmelCase = initializer_range
| 102 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
lowercase__ : Optional[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowercase__ : Tuple = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowercase__ : List[str] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowercase__ : Union[str, Any] = os.path.join(get_home_dir() , '''models''' )
lowercase__ : List[str] = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
lowercase__ : str = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
lowercase__ : List[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowercase__ : Optional[int] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCamelCase ),
}
lowercase__ : str = BertConfig.from_dict(__lowerCamelCase )
lowercase__ : str = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : str = hf_param.shape
lowercase__ : int = to_torch(params[gluon_param] )
lowercase__ : Optional[Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowercase__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowercase__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowercase__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowercase__ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowercase__ : int = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowercase__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowercase__ : BertSelfAttention = layer.attention.self
lowercase__ : Any = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowercase__ : int = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowercase__ : Optional[int] = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowercase__ : int = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowercase__ : Optional[int] = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowercase__ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowercase__ : BertSelfOutput = layer.attention.output
lowercase__ : Union[str, Any] = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowercase__ : List[str] = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowercase__ : List[str] = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowercase__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowercase__ : BertIntermediate = layer.intermediate
lowercase__ : Dict = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowercase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowercase__ : BertOutput = layer.output
lowercase__ : str = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowercase__ : Any = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowercase__ : int = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowercase__ : Any = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowercase__ : str = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowercase__ : Tuple = tokenizer.encode_plus(__lowerCamelCase )['''input_ids''']
# Get gluon output
lowercase__ : List[str] = mx.nd.array([input_ids] )
lowercase__ : Union[str, Any] = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
lowercase__ : List[str] = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
lowercase__ : Tuple = tokenizer.encode_plus(__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : Dict = hf_bort_model(**__lowerCamelCase )[0]
lowercase__ : List[str] = output_gluon[0].asnumpy()
lowercase__ : Optional[Any] = output_hf[0].detach().numpy()
lowercase__ : List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowercase__ : List[str] = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 16 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase :Tuple = 1024
lowercase :Tuple = 4096
lowercase :Union[str, Any] = 24
lowercase :Tuple = 16
lowercase :Optional[Any] = [5, 11, 17, 23]
lowercase :Optional[Any] = [256, 512, 1024, 1024]
lowercase :Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowercase :Any = 768
lowercase :Optional[int] = [1, 1, 1, 0.5]
lowercase :int = [256, 512, 768, 768]
lowercase :str = 150
lowercase :Tuple = 16
lowercase :List[str] = (1, 384, 384)
lowercase :Any = False
lowercase :Optional[int] = "project"
if "ade" in checkpoint_url:
lowercase :Dict = True
lowercase :str = 768
lowercase :Tuple = [1, 1, 1, 0.5]
lowercase :Dict = 150
lowercase :Any = 16
lowercase :List[Any] = "huggingface/label-files"
lowercase :Union[str, Any] = "ade20k-id2label.json"
lowercase :List[str] = json.load(open(cached_download(hf_hub_url(lowerCamelCase, lowerCamelCase, repo_type="dataset" ) ), "r" ) )
lowercase :Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowercase :Union[str, Any] = idalabel
lowercase :Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase :List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase :Any = name.replace("pretrained.model", "dpt.encoder" )
if "pretrained.model" in name:
lowercase :int = name.replace("pretrained.model", "dpt.embeddings" )
if "patch_embed" in name:
lowercase :Any = name.replace("patch_embed", "" )
if "pos_embed" in name:
lowercase :Optional[Any] = name.replace("pos_embed", "position_embeddings" )
if "attn.proj" in name:
lowercase :List[str] = name.replace("attn.proj", "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase :str = name.replace("proj", "projection" )
if "blocks" in name:
lowercase :List[Any] = name.replace("blocks", "layer" )
if "mlp.fc1" in name:
lowercase :str = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
lowercase :Any = name.replace("mlp.fc2", "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase :Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase :Any = name.replace("norm2", "layernorm_after" )
if "scratch.output_conv" in name:
lowercase :Optional[Any] = name.replace("scratch.output_conv", "head" )
if "scratch" in name:
lowercase :Union[str, Any] = name.replace("scratch", "neck" )
if "layer1_rn" in name:
lowercase :str = name.replace("layer1_rn", "convs.0" )
if "layer2_rn" in name:
lowercase :Dict = name.replace("layer2_rn", "convs.1" )
if "layer3_rn" in name:
lowercase :Tuple = name.replace("layer3_rn", "convs.2" )
if "layer4_rn" in name:
lowercase :Optional[int] = name.replace("layer4_rn", "convs.3" )
if "refinenet" in name:
lowercase :Any = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase :Any = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowercase :str = name.replace("out_conv", "projection" )
if "resConfUnit1" in name:
lowercase :Optional[Any] = name.replace("resConfUnit1", "residual_layer1" )
if "resConfUnit2" in name:
lowercase :int = name.replace("resConfUnit2", "residual_layer2" )
if "conv1" in name:
lowercase :Optional[int] = name.replace("conv1", "convolution1" )
if "conv2" in name:
lowercase :List[str] = name.replace("conv2", "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase :Tuple = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase :Any = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase :Optional[Any] = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase :Dict = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase :str = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase :str = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase :List[Any] = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase :Any = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase :Any = name.replace("pretrained", "dpt" )
if "bn" in name:
lowercase :Optional[int] = name.replace("bn", "batch_norm" )
if "head" in name:
lowercase :Union[str, Any] = name.replace("head", "head.head" )
if "encoder.norm" in name:
lowercase :Optional[Any] = name.replace("encoder.norm", "layernorm" )
if "auxlayer" in name:
lowercase :str = name.replace("auxlayer", "auxiliary_head.head" )
if "backbone" in name:
lowercase :List[str] = name.replace("backbone", "backbone.bit.encoder" )
if ".." in name:
lowercase :Optional[int] = name.replace("..", "." )
if "stem.conv" in name:
lowercase :Union[str, Any] = name.replace("stem.conv", "bit.embedder.convolution" )
if "blocks" in name:
lowercase :List[str] = name.replace("blocks", "layers" )
if "convolution" in name and "backbone" in name:
lowercase :int = name.replace("convolution", "conv" )
if "layer" in name and "backbone" in name:
lowercase :List[str] = name.replace("layer", "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase :Dict = name.replace("backbone.bit.encoder.bit", "backbone.bit" )
if "embedder.conv" in name:
lowercase :Dict = name.replace("embedder.conv", "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase :str = name.replace("backbone.bit.encoder.stem.norm", "backbone.bit.embedder.norm" )
return name
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowercase :str = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase :List[str] = in_proj_weight[: config.hidden_size, :]
lowercase :int = in_proj_bias[: config.hidden_size]
lowercase :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase :List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( ):
lowercase :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase :Any = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase , lowercase :Optional[int] = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase :List[str] = torch.load(lowerCamelCase, map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowercase :int = state_dict.pop(lowerCamelCase )
lowercase :Dict = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase, lowerCamelCase )
# load HuggingFace model
lowercase :int = DPTForSemanticSegmentation(lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
lowercase :Union[str, Any] = 480 if "ade" in checkpoint_url else 384
lowercase :Optional[int] = DPTImageProcessor(size=lowerCamelCase )
lowercase :Dict = prepare_img()
lowercase :Optional[Any] = image_processor(lowerCamelCase, return_tensors="pt" )
# forward pass
lowercase :Any = model(**lowerCamelCase ).logits if "ade" in checkpoint_url else model(**lowerCamelCase ).predicted_depth
if show_prediction:
lowercase :List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode="bicubic", align_corners=lowerCamelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 0 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Dict = 16
lowercase__ : int = 32
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int = 16 ) -> Optional[int]:
__A : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : List[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Any ):
# max_length=None => use the model max length (it's actually the default)
__A : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : Dict = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : Any = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : Tuple = 16
elif accelerator.mixed_precision != "no":
__A : int = 8
else:
__A : List[Any] = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__A : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case )
__A : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( __snake_case : str , __snake_case : Any ) -> Union[str, Any]:
# Initialize accelerator
__A : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Union[str, Any] = config['lr']
__A : Union[str, Any] = int(config['num_epochs'] )
__A : List[Any] = int(config['seed'] )
__A : Union[str, Any] = int(config['batch_size'] )
__A : int = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__A : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
__A : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
__A ,__A : Union[str, Any] = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : Dict = model.to(accelerator.device )
# Instantiate optimizer
__A : Optional[int] = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
__A : Dict = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_00 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : Optional[int] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : Any = model(**__snake_case )
__A : Optional[Any] = outputs.loss
__A : Any = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Dict = model(**__snake_case )
__A : Optional[Any] = outputs.logits.argmax(dim=-1 )
__A ,__A : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__A : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __snake_case )
def _lowerCAmelCase ( ) -> List[Any]:
__A : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__A : List[Any] = parser.parse_args()
__A : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main() | 190 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ : List[Any] = '''bert-base-cased'''
lowercase__ : Union[str, Any] = '''google/pegasus-xsum'''
lowercase__ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase__ : Optional[Any] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase__ : str = '''patrickvonplaten/t5-tiny-random'''
lowercase__ : List[str] = '''sshleifer/bart-tiny-random'''
lowercase__ : List[str] = '''sshleifer/tiny-mbart'''
lowercase__ : str = '''sshleifer/tiny-marian-en-de'''
def _lowerCAmelCase ( __snake_case : Path , __snake_case : list ) -> str:
__A : Any = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case , f'{split}.source' ) , __snake_case )
_dump_articles(os.path.join(__snake_case , f'{split}.target' ) , __snake_case )
return tmp_dir
class SCREAMING_SNAKE_CASE (a__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : int = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : str = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Dict = 4
__A : Optional[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__A ,__A : Any = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
__A : List[str] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , )
__A : Any = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__A : Optional[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : Tuple = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : Any = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Optional[int] = 4
__A : Any = LegacySeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=20 , max_target_length=_UpperCAmelCase , )
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25')
__A : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__A : List[str] = tmp_dir.joinpath('train.source').open().readlines()
__A : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(_UpperCAmelCase , _UpperCAmelCase , 128 , _UpperCAmelCase)
__A : Dict = {x.name for x in tmp_dir.iterdir()}
__A : Dict = {x.name for x in save_dir.iterdir()}
__A : str = save_dir.joinpath('train.source').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_UpperCAmelCase) < len(_UpperCAmelCase)
assert len(_UpperCAmelCase) == 1
assert len(packed_examples[0]) == sum(len(_UpperCAmelCase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
__A ,__A ,__A : List[Any] = self._get_dataset(max_len=64)
__A : Union[str, Any] = 64
__A : List[Any] = ds.make_dynamic_sampler(_UpperCAmelCase , required_batch_size_multiple=_UpperCAmelCase)
__A : Union[str, Any] = [len(_UpperCAmelCase) for x in batch_sampler]
assert len(set(_UpperCAmelCase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_UpperCAmelCase) == len(_UpperCAmelCase) # no dropped or added examples
__A : List[Any] = DataLoader(_UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Optional[int] = []
__A : Tuple = []
for batch in data_loader:
__A : Optional[int] = batch['input_ids'].shape
__A : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__A : Tuple = np.product(batch['input_ids'].shape)
num_src_per_batch.append(_UpperCAmelCase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(_UpperCAmelCase)
assert num_src_per_batch[0] == max(_UpperCAmelCase)
if failures:
raise AssertionError(F'too many tokens in {len(_UpperCAmelCase)} batches')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Optional[int] = self._get_dataset(max_len=512)
__A : Optional[int] = 2
__A : Dict = ds.make_sortish_sampler(_UpperCAmelCase , shuffle=_UpperCAmelCase)
__A : Tuple = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCAmelCase)
__A : str = tokenizer.pad_token_id
def count_pad_tokens(_UpperCAmelCase , _UpperCAmelCase="input_ids"):
return [batch[k].eq(_UpperCAmelCase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_UpperCAmelCase , k='labels')) < sum(count_pad_tokens(_UpperCAmelCase , k='labels'))
assert sum(count_pad_tokens(_UpperCAmelCase)) < sum(count_pad_tokens(_UpperCAmelCase))
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=1000 , _UpperCAmelCase=128):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , _UpperCAmelCase):
__A : Dict = 'examples/seq2seq/wmt_en_ro'
__A : Any = max_len * 2 * 64
if not Path(_UpperCAmelCase).joinpath('train.len').exists():
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
else:
__A : int = 'examples/seq2seq/test_data/wmt_en_ro'
__A : Any = max_len * 4
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , n_obs=_UpperCAmelCase , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Tuple = self._get_dataset()
__A : Optional[int] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCAmelCase))
__A : List[str] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCAmelCase))
assert idsa.intersection(_UpperCAmelCase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
if tok_name == MBART_TINY:
__A : Dict = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
__A : List[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__A : Any = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , )
__A : List[str] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_UpperCAmelCase) == 1 if tok_name == BART_TINY else len(_UpperCAmelCase) == 0 | 190 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = ['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : str = size if size is not None else {'shortest_edge': 2_2_4}
UpperCAmelCase_ : Any = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='crop_size' )
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Union[str, Any] = crop_size
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : Union[str, Any] = rescale_factor
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : Tuple = do_convert_rgb
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase_ : Union[str, Any] = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : str = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> Union[str, Any]:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image:
UpperCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Optional[int] = size if size is not None else self.size
UpperCAmelCase_ : Tuple = get_size_dict(_UpperCamelCase , param_name='size' , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Any = get_size_dict(_UpperCamelCase , param_name='crop_size' , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : List[Any] = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : Dict = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ : int = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ : List[Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
UpperCAmelCase_ : Optional[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
UpperCAmelCase_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 29 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A__ : Optional[Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A__ : Tuple = 'main'
# Default branch name
A__ : Optional[Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
A__ : Tuple = 'aaaaaaa'
# This commit does not exist, so we should 404.
A__ : Tuple = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
A__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _snake_case ( ) -> str:
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def _snake_case ( ) -> List[str]:
print("Bonjour!" )
yield
print("Au revoir!" )
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowercase__ ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCAmelCase__ ( self : int ):
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] )
class lowercase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
@require_tf
def UpperCAmelCase__ ( self : int ):
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] )
class lowercase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
@require_flax
def UpperCAmelCase__ ( self : Tuple ):
# Flax models don't have labels
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
class lowercase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(snake_case__ ) , [] )
| 144 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
__A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ["input_ids", "attention_mask"]
A_ = []
A_ = []
def __init__( self: Any , __A: int , __A: List[str] , __A: str=None , __A: Optional[Any]=None , __A: int="<s>" , __A: Dict="</s>" , __A: Tuple="</s>" , __A: List[str]="<pad>" , __A: List[Any]="<unk>" , __A: Any="m2m100" , __A: Optional[int] = None , __A: List[str]=8 , **__A: Any , ) -> None:
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = language_codes
_A = FAIRSEQ_LANGUAGE_CODES[language_codes]
_A = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_A = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__A )
for lang_code in fairseq_language_code
if self.get_lang_token(__A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , language_codes=__A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__A , **__A , )
_A = vocab_file
_A = load_json(__A )
_A = {v: k for k, v in self.encoder.items()}
_A = spm_file
_A = load_spm(__A , self.sp_model_kwargs )
_A = len(self.encoder )
_A = {
self.get_lang_token(__A ): self.encoder_size + i for i, lang_code in enumerate(__A )
}
_A = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__A )}
_A = {v: k for k, v in self.lang_token_to_id.items()}
_A = src_lang if src_lang is not None else '''en'''
_A = tgt_lang
_A = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_A = num_madeup_words
@property
def __A ( self: List[str] ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __A ( self: int ) -> str:
return self._src_lang
@src_lang.setter
def __A ( self: int , __A: List[str] ) -> None:
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self: int , __A: Optional[int] ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def __A ( self: Union[str, Any] , __A: Any ) -> List[str]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__A , self.encoder[self.unk_token] )
def __A ( self: str , __A: Tuple ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__A , self.unk_token )
def __A ( self: List[Any] , __A: Union[str, Any] ) -> List[Any]:
_A = []
_A = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
_A = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __A ( self: str , __A: int , __A: Dict = None , __A: Tuple = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
_A = [1] * len(self.prefix_tokens )
_A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def __A ( self: str , __A: Optional[Any] , __A: Optional[int] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self: Optional[Any] ) -> Dict:
_A = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Dict:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self: List[str] , __A: Optional[int] ) -> None:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = load_spm(self.spm_file , self.sp_model_kwargs )
def __A ( self: List[str] , __A: int , __A: Tuple = None ) -> Tuple[str]:
_A = Path(__A )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_A = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_A = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __A )
if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __A )
elif not os.path.isfile(self.spm_file ):
with open(__A , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__A )
return (str(__A ), str(__A ))
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: Optional[Any] = "en" , __A: Optional[Any] = None , __A: List[str] = "ro" , **__A: str , ) -> BatchEncoding:
_A = src_lang
_A = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__A , __A , **__A )
def __A ( self: str , __A: Any , __A: Tuple , __A: str , **__A: List[Any] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_A = src_lang
_A = self(__A , add_special_tokens=__A , **__A )
_A = self.get_lang_id(__A )
_A = tgt_lang_id
return inputs
def __A ( self: Optional[Any] ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def __A ( self: Any ) -> List[str]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self: Tuple , __A: int ) -> None:
_A = self.get_lang_token(__A )
_A = self.lang_token_to_id[lang_token]
_A = [self.cur_lang_id]
_A = [self.eos_token_id]
def __A ( self: Optional[int] , __A: str ) -> None:
_A = self.get_lang_token(__A )
_A = self.lang_token_to_id[lang_token]
_A = [self.cur_lang_id]
_A = [self.eos_token_id]
def __A ( self: Tuple , __A: List[str] ) -> str:
return self.lang_code_to_token[lang]
def __A ( self: List[Any] , __A: Tuple ) -> int:
_A = self.get_lang_token(__A )
return self.lang_token_to_id[lang_token]
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = sentencepiece.SentencePieceProcessor(**UpperCamelCase__ )
spm.Load(str(UpperCamelCase__ ) )
return spm
def __A ( _lowercase ):
'''simple docstring'''
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with open(UpperCamelCase__ , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=2 )
| 352 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: List[Any] , *__A: Union[str, Any] , **__A: Optional[Any] ) -> None:
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A )
| 75 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = tempfile.mkdtemp()
# fmt: off
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__a = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
__a = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Dict , **__lowercase : List[Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.get_tokenizer()
__a = self.get_image_processor()
__a = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
__a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__a = self.prepare_image_inputs()
__a = image_processor(__lowercase , return_tensors="""np""" )
__a = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__a = """lower newer"""
__a = processor(text=__lowercase )
__a = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__a = """lower newer"""
__a = self.prepare_image_inputs()
__a = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__lowercase ):
processor()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__lowercase )
__a = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__a = """lower newer"""
__a = self.prepare_image_inputs()
__a = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 302 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302 | 1 |
'''simple docstring'''
lowerCamelCase__ = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_UpperCAmelCase : Any = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(__lowerCAmelCase )}"""
)
raise ValueError(__lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_snake_case : Tuple = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_snake_case : int = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_snake_case : Union[str, Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : int ) -> List[Any]:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , ) -> Tuple:
__snake_case : Union[str, Any] = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__snake_case : Tuple = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
__snake_case : Any = TER(
normalized=lowerCamelCase , no_punct=lowerCamelCase , asian_support=lowerCamelCase , case_sensitive=lowerCamelCase , )
__snake_case : List[Any] = sb_ter.corpus_score(lowerCamelCase , lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 123 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : int = 101 ) -> Dict:
__snake_case : str = length
def __len__( self : Optional[Any] ) -> Any:
return self.length
def __getitem__( self : int , lowerCamelCase : Optional[Any] ) -> int:
return i
class a :
"""simple docstring"""
def __call__( self : List[Any] , lowerCamelCase : Any ) -> Tuple:
return {"input_ids": torch.tensor(lowerCamelCase ), "labels": torch.tensor(lowerCamelCase )}
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> str:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__snake_case : Optional[Any] = nn.Linear(120 , 80 )
def __snake_case ( self : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class a (_lowerCAmelCase ):
"""simple docstring"""
@require_torch_neuroncore
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Dict = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
__snake_case : int = F'--output_dir {output_dir}'.split()
__snake_case : str = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a (_lowerCAmelCase ):
"""simple docstring"""
@require_torch_multi_gpu
def __snake_case ( self : str ) -> int:
__snake_case : List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
__snake_case : int = F'--output_dir {output_dir}'.split()
__snake_case : Optional[int] = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case : Optional[int] = HfArgumentParser((TrainingArguments,))
_snake_case : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case : Optional[int] = DummyDataset(dataset_length)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = list(range(len(__lowerCamelCase ) ) )
__snake_case : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
_snake_case : List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case : str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case : List[Any] = 2
_snake_case : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case : int = None
| 123 | 1 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _SCREAMING_SNAKE_CASE ( __a ,__a ):
__SCREAMING_SNAKE_CASE :Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , a__ : int = 1000 , a__ : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(a__ )
# standard deviation of the initial noise distribution
__magic_name__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__magic_name__ = 4
# running values
__magic_name__ = []
def snake_case__ ( self : Dict , a__ : int , a__ : Union[str, torch.device] = None ):
__magic_name__ = num_inference_steps
__magic_name__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__magic_name__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__magic_name__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__magic_name__ = torch.sin(steps * math.pi / 2 ) ** 2
__magic_name__ = (1.0 - self.betas**2) ** 0.5
__magic_name__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__magic_name__ = timesteps.to(a__ )
__magic_name__ = []
def snake_case__ ( self : Union[str, Any] , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__magic_name__ = (self.timesteps == timestep).nonzero().item()
__magic_name__ = timestep_index + 1
__magic_name__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(a__ )
if len(self.ets ) == 1:
__magic_name__ = self.ets[-1]
elif len(self.ets ) == 2:
__magic_name__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__magic_name__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__magic_name__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__magic_name__ = self._get_prev_sample(a__ , a__ , a__ , a__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def snake_case__ ( self : Any , a__ : torch.FloatTensor , *a__ : Optional[int] , **a__ : Union[str, Any] ):
return sample
def snake_case__ ( self : str , a__ : Optional[int] , a__ : int , a__ : List[Any] , a__ : List[str] ):
__magic_name__ = self.alphas[timestep_index]
__magic_name__ = self.betas[timestep_index]
__magic_name__ = self.alphas[prev_timestep_index]
__magic_name__ = self.betas[prev_timestep_index]
__magic_name__ = (sample - sigma * ets) / max(a__ , 1E-8 )
__magic_name__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
return self.config.num_train_timesteps
| 98 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowerCAmelCase = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_lowerCAmelCase = 10
_lowerCAmelCase = 256
def UpperCamelCase ( a ) -> Optional[MinHash]:
'''simple docstring'''
if len(a ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( a ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , *,
a__ : float = 0.85 , ):
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(a__ )
def snake_case__ ( self : int , a__ : Tuple , a__ : MinHash ):
__magic_name__ = self._index.query(a__ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(a__ )
# reformat the cluster to be a list of dict
__magic_name__ = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def snake_case__ ( self : int , a__ : Tuple ):
__magic_name__ = self.get_duplicate_clusters()
with open(a__ , '''w''' ) as f:
json.dump(a__ , a__ )
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( a , a ) -> Tuple:
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( a , a ) -> float:
'''simple docstring'''
__magic_name__ = get_tokens(a )
__magic_name__ = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowerCAmelCase = None
def UpperCamelCase ( a , a ) -> Dict:
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(a )
return extremes
def UpperCamelCase ( a , a , a ) -> Optional[Any]:
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def UpperCamelCase ( a , a = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(a , a )
__magic_name__ = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(a )}''' )
print(F'''Number of duplicate clusters: {len(a )}''' )
print(F'''Files in duplicate cluster: {len(a )}''' )
print(F'''Unique files in duplicate cluster: {len(a )}''' )
print(F'''Filtered dataset size: {len(a )}''' )
return ds_filter, duplicate_clusters
| 98 | 1 |
"""simple docstring"""
import numpy as np
import qiskit
def _snake_case ( lowercase__ = 8 , lowercase__ = None ):
_lowerCamelCase : str = np.random.default_rng(seed=lowercase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCamelCase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCamelCase : int = rng.integers(2 , size=lowercase__ )
# The set of states Alice will prepare.
_lowerCamelCase : str = rng.integers(2 , size=lowercase__ )
# Measurement basis for Bob's qubits.
_lowerCamelCase : str = rng.integers(2 , size=lowercase__ )
# Quantum Circuit to simulate BB84
_lowerCamelCase : Dict = qiskit.QuantumCircuit(lowercase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowercase__ ):
if alice_state[index] == 1:
bbaa_circ.x(lowercase__ )
if alice_basis[index] == 1:
bbaa_circ.h(lowercase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowercase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowercase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCamelCase : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCamelCase : List[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1 , seed_simulator=lowercase__ )
# Returns the result of measurement.
_lowerCamelCase : Optional[Any] = job.result().get_counts(lowercase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCamelCase : Optional[int] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowercase__ , lowercase__ , lowercase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCamelCase : Union[str, Any] = gen_key[:key_len] if len(lowercase__ ) >= key_len else gen_key.ljust(lowercase__ , '0' )
return key
if __name__ == "__main__":
print(F"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod() | 96 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A : str = sys.version_info >= (3, 10)
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : Tuple )->Optional[int]:
_UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def lowercase__ ( self : int )->str:
_UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase )
self.assertFalse(example.flag )
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
_UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
def lowercase__ ( self : Optional[Any] )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowercase__ ( self : List[str] )->List[str]:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def lowercase__ ( self : int )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
__UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
_UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) )
_UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : str )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 260 | 0 |
from __future__ import annotations
def _A ( lowerCAmelCase_ : int = 4 ):
"""simple docstring"""
lowerCAmelCase__ = abs(lowerCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )]
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(lowerCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase__ = [list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )]
return matrix
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase__ = matrix[::-1]
return matrix
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase__ = [x[::-1] for x in matrix]
return matrix
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
UpperCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
UpperCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 221 |
from math import pi, sqrt
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = 1.0
while num:
UpperCamelCase = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 221 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowercase : str = logging.get_logger(__name__)
def A_ ( A__ , A__ ) -> List[str]:
try:
with open(A__ , 'rb' ) as flax_state_f:
a__ : Union[str, Any] = from_bytes(A__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(A__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(A__ , A__ )
def A_ ( A__ , A__ ) -> str:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
a__ : int = flatten_dict(jax.tree_util.tree_map(lambda A__ : x.dtype == jnp.bfloataa , A__ ) ).values()
if any(A__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
a__ : Optional[Any] = jax.tree_util.tree_map(
lambda A__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A__ )
a__ : Dict = ''
a__ : Optional[int] = flatten_dict(A__ , sep='.' )
a__ : Dict = pt_model.state_dict()
# keep track of unexpected & missing keys
a__ : Union[str, Any] = []
a__ : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
a__ : Union[str, Any] = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
a__ : Any = flax_key_tuple_array[:-1] + ['weight']
a__ : Union[str, Any] = jnp.transpose(A__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
a__ : List[str] = flax_key_tuple_array[:-1] + ['weight']
a__ : int = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
a__ : Dict = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(A__ ):
a__ : Union[str, Any] = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
a__ : Optional[int] = '.'.join(A__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
a__ : Optional[int] = np.asarray(A__ ) if not isinstance(A__ , np.ndarray ) else flax_tensor
a__ : Dict = torch.from_numpy(A__ )
# remove from missing keys
missing_keys.remove(A__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A__ )
pt_model.load_state_dict(A__ )
# re-transform missing_keys to list
a__ : Optional[Any] = list(A__ )
if len(A__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(A__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 99 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , )
_lowerCAmelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
_lowerCAmelCase = json.load(lowerCAmelCase )
for dpr_record in tqdm(lowerCAmelCase ):
_lowerCAmelCase = dpr_record["""question"""]
_lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 70 | 0 |
"""simple docstring"""
import math
import unittest
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->bool:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(snake_case ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 296 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
a_ : Dict = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__A , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__A , default=5 )
parser.add_argument('--batch_size' , type=__A , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__A , default=1 )
parser.add_argument('--freeze' , type=__A , default=__A )
parser.add_argument('--learning_rate' , type=__A , default=5e-4 )
parser.add_argument('--seed' , type=__A , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__A , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__A , default=10 )
parser.add_argument('--weight_decay' , type=__A , default=0.01 )
parser.add_argument('--output_dir' , type=__A , default='./results' )
return parser.parse_args()
UpperCAmelCase_ : Optional[int] = load('accuracy')
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> str:
"""simple docstring"""
a_ , a_ : str = eval_pred
a_ : int = np.argmax(__A , axis=1 )
return metric.compute(predictions=__A , references=__A )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> None:
super().__init__()
a_ : List[str] = trainer
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
if control.should_evaluate:
a_ : List[Any] = deepcopy(SCREAMING_SNAKE_CASE__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
a_ : List[Any] = get_args()
set_seed(args.seed )
a_ : Optional[int] = load_dataset('codeparrot/codecomplex' , split='train' )
a_ : Optional[Any] = dataset.train_test_split(test_size=0.2 )
a_ : Dict = train_test['test'].train_test_split(test_size=0.5 )
a_ : Dict = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
a_ : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
a_ : Dict = tokenizer.eos_token
a_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a_ : int = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a_ : Union[str, Any] = False
a_ : int = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__A : str ):
a_ : Optional[Any] = tokenizer(example['src'] , truncation=__A , max_length=10_24 )
a_ : Union[str, Any] = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a_ : List[str] = train_test_validation.map(
__A , batched=__A , remove_columns=train_test_validation['train'].column_names , )
a_ : Tuple = DataCollatorWithPadding(tokenizer=__A )
a_ : Optional[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
a_ : Optional[Any] = Trainer(
model=__A , args=__A , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__A , data_collator=__A , compute_metrics=__A , )
print('Training...' )
trainer.add_callback(CustomCallback(__A ) )
trainer.train()
if __name__ == "__main__":
main()
| 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''switch_transformers'''
snake_case__ : Optional[int] = ['''past_key_values''']
snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
a_ : Optional[int] = vocab_size
a_ : List[str] = d_model
a_ : Tuple = d_kv
a_ : Optional[Any] = d_ff
a_ : List[Any] = num_sparse_encoder_layers
a_ : Any = num_layers
a_ : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ : Dict = num_heads
a_ : str = num_experts
a_ : Any = expert_capacity
a_ : List[Any] = router_bias
a_ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Optional[int] = router_dtype
a_ : int = router_ignore_padding_tokens
a_ : Any = relative_attention_num_buckets
a_ : List[str] = relative_attention_max_distance
a_ : Optional[Any] = dropout_rate
a_ : Tuple = layer_norm_epsilon
a_ : Dict = initializer_factor
a_ : Any = feed_forward_proj
a_ : Tuple = use_cache
a_ : str = add_router_probs
a_ : Optional[int] = router_z_loss_coef
a_ : List[str] = router_aux_loss_coef
a_ : int = self.feed_forward_proj.split('-' )
a_ : int = act_info[-1]
a_ : Optional[int] = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : Any = 'gelu_new'
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 32 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : str = sample_size
# time
if time_embedding_type == "fourier":
__UpperCAmelCase : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ )
__UpperCAmelCase : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__UpperCAmelCase : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ )
__UpperCAmelCase : Dict = block_out_channels[0]
if use_timestep_embedding:
__UpperCAmelCase : Union[str, Any] = block_out_channels[0] * 4
__UpperCAmelCase : str = TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
__UpperCAmelCase : Tuple = nn.ModuleList([] )
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = nn.ModuleList([] )
__UpperCAmelCase : Dict = None
# down
__UpperCAmelCase : str = in_channels
for i, down_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = output_channel
__UpperCAmelCase : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : List[str] = get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
__UpperCAmelCase : Optional[Any] = get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
__UpperCAmelCase : Tuple = list(reversed(UpperCAmelCase_ ) )
__UpperCAmelCase : Any = reversed_block_out_channels[0]
if out_block_type is None:
__UpperCAmelCase : Union[str, Any] = out_channels
else:
__UpperCAmelCase : Dict = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : int = output_channel
__UpperCAmelCase : str = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels
)
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : Dict = get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = output_channel
# out
__UpperCAmelCase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__UpperCAmelCase : List[Any] = get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if not torch.is_tensor(UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(sample.device )
__UpperCAmelCase : List[str] = self.time_proj(UpperCAmelCase_ )
if self.config.use_timestep_embedding:
__UpperCAmelCase : Any = self.time_mlp(UpperCAmelCase_ )
else:
__UpperCAmelCase : Any = timestep_embed[..., None]
__UpperCAmelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__UpperCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__UpperCAmelCase : int = ()
for downsample_block in self.down_blocks:
__UpperCAmelCase : int = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__UpperCAmelCase : List[str] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__UpperCAmelCase : Any = down_block_res_samples[-1:]
__UpperCAmelCase : List[Any] = down_block_res_samples[:-1]
__UpperCAmelCase : str = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ )
# 5. post-process
if self.out_block:
__UpperCAmelCase : Tuple = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_ )
| 356 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase ):
if not nums:
raise ValueError("List is empty" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = None
@staticmethod
def A_ ( ):
raise NotImplementedError
def A_ ( self , lowercase , lowercase , lowercase , **lowercase ):
raise NotImplementedError
def A_ ( self , lowercase ):
raise NotImplementedError
def A_ ( self ):
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def A_ ( cls ):
return F'''`pip install {cls.pip_package or cls.name}`'''
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ = '''optuna'''
@staticmethod
def A_ ( ):
return is_optuna_available()
def A_ ( self , lowercase , lowercase , lowercase , **lowercase ):
return run_hp_search_optuna(_A , _A , _A , **_A )
def A_ ( self , lowercase ):
return default_hp_space_optuna(_A )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ = '''ray'''
lowerCamelCase__ = '''\'ray[tune]\''''
@staticmethod
def A_ ( ):
return is_ray_available()
def A_ ( self , lowercase , lowercase , lowercase , **lowercase ):
return run_hp_search_ray(_A , _A , _A , **_A )
def A_ ( self , lowercase ):
return default_hp_space_ray(_A )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ = '''sigopt'''
@staticmethod
def A_ ( ):
return is_sigopt_available()
def A_ ( self , lowercase , lowercase , lowercase , **lowercase ):
return run_hp_search_sigopt(_A , _A , _A , **_A )
def A_ ( self , lowercase ):
return default_hp_space_sigopt(_A )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ = '''wandb'''
@staticmethod
def A_ ( ):
return is_wandb_available()
def A_ ( self , lowercase , lowercase , lowercase , **lowercase ):
return run_hp_search_wandb(_A , _A , _A , **_A )
def A_ ( self , lowercase ):
return default_hp_space_wandb(_A )
lowercase__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _snake_case ( ):
_lowerCamelCase : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase__ ) > 0:
_lowerCamelCase : Optional[int] = available_backends[0].name
if len(lowercase__ ) > 1:
logger.info(
f'''{len(lowercase__ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) ) | 96 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 308 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = AudioLDMPipeline
lowerCAmelCase = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_UpperCAmelCase , )
__A : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0)
__A : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
__A : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__A : Any = ClapTextModelWithProjection(_UpperCAmelCase)
__A : int = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77)
__A : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_UpperCAmelCase , )
__A : Optional[Any] = SpeechTaHifiGan(_UpperCAmelCase)
__A : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0):
'''simple docstring'''
if str(_UpperCAmelCase).startswith('mps'):
__A : Any = torch.manual_seed(_UpperCAmelCase)
else:
__A : str = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : List[str] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Dict = self.get_dummy_components()
__A : Optional[int] = AudioLDMPipeline(**_UpperCAmelCase)
__A : Optional[Any] = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = self.get_dummy_inputs(_UpperCAmelCase)
__A : Tuple = audioldm_pipe(**_UpperCAmelCase)
__A : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCAmelCase) == 256
__A : Any = audio[:10]
__A : Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_dummy_components()
__A : List[Any] = AudioLDMPipeline(**_UpperCAmelCase)
__A : List[Any] = audioldm_pipe.to(_UpperCAmelCase)
__A : List[Any] = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : str = self.get_dummy_inputs(_UpperCAmelCase)
__A : List[Any] = 3 * [inputs['prompt']]
# forward
__A : Tuple = audioldm_pipe(**_UpperCAmelCase)
__A : str = output.audios[0]
__A : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase)
__A : Union[str, Any] = 3 * [inputs.pop('prompt')]
__A : Any = audioldm_pipe.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
__A : Tuple = text_inputs['input_ids'].to(_UpperCAmelCase)
__A : Optional[int] = audioldm_pipe.text_encoder(
_UpperCAmelCase , )
__A : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__A : Optional[Any] = F.normalize(_UpperCAmelCase , dim=-1)
__A : Optional[Any] = prompt_embeds
# forward
__A : Union[str, Any] = audioldm_pipe(**_UpperCAmelCase)
__A : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_dummy_components()
__A : Tuple = AudioLDMPipeline(**_UpperCAmelCase)
__A : Dict = audioldm_pipe.to(_UpperCAmelCase)
__A : Optional[Any] = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Tuple = self.get_dummy_inputs(_UpperCAmelCase)
__A : Union[str, Any] = 3 * ['this is a negative prompt']
__A : List[Any] = negative_prompt
__A : int = 3 * [inputs['prompt']]
# forward
__A : List[Any] = audioldm_pipe(**_UpperCAmelCase)
__A : str = output.audios[0]
__A : Any = self.get_dummy_inputs(_UpperCAmelCase)
__A : Any = 3 * [inputs.pop('prompt')]
__A : Any = []
for p in [prompt, negative_prompt]:
__A : Union[str, Any] = audioldm_pipe.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
__A : Tuple = text_inputs['input_ids'].to(_UpperCAmelCase)
__A : Any = audioldm_pipe.text_encoder(
_UpperCAmelCase , )
__A : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__A : str = F.normalize(_UpperCAmelCase , dim=-1)
embeds.append(_UpperCAmelCase)
__A : List[str] = embeds
# forward
__A : str = audioldm_pipe(**_UpperCAmelCase)
__A : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Optional[Any] = self.get_dummy_components()
__A : List[Any] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase)
__A : int = AudioLDMPipeline(**_UpperCAmelCase)
__A : Optional[Any] = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = self.get_dummy_inputs(_UpperCAmelCase)
__A : List[str] = 'egg cracking'
__A : Union[str, Any] = audioldm_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase)
__A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCAmelCase) == 256
__A : Dict = audio[:10]
__A : Optional[int] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Dict = self.get_dummy_components()
__A : List[Any] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase)
__A : Optional[Any] = AudioLDMPipeline(**_UpperCAmelCase)
__A : Any = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
__A : Dict = audioldm_pipe(_UpperCAmelCase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__A : str = 2
__A : str = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__A : Union[str, Any] = 2
__A : Union[str, Any] = audioldm_pipe(_UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCAmelCase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__A : int = 2
__A : Any = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCAmelCase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Optional[int] = self.get_dummy_components()
__A : int = AudioLDMPipeline(**_UpperCAmelCase)
__A : List[str] = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : List[str] = audioldm_pipe.vocoder.config.sampling_rate
__A : Optional[Any] = self.get_dummy_inputs(_UpperCAmelCase)
__A : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **_UpperCAmelCase)
__A : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCAmelCase) / vocoder_sampling_rate == 0.016
__A : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **_UpperCAmelCase)
__A : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCAmelCase) / vocoder_sampling_rate == 0.032
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_dummy_components()
__A : List[Any] = AudioLDMPipeline(**_UpperCAmelCase)
__A : Tuple = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : List[str] = ['hey']
__A : Dict = audioldm_pipe(_UpperCAmelCase , num_inference_steps=1)
__A : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
__A : Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__A : Any = SpeechTaHifiGan(_UpperCAmelCase).to(_UpperCAmelCase)
__A : int = audioldm_pipe(_UpperCAmelCase , num_inference_steps=1)
__A : Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=_UpperCAmelCase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase)
@slow
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase="cpu" , _UpperCAmelCase=torch.floataa , _UpperCAmelCase=0):
'''simple docstring'''
__A : List[str] = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : Tuple = np.random.RandomState(_UpperCAmelCase).standard_normal((1, 8, 128, 16))
__A : Any = torch.from_numpy(_UpperCAmelCase).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase)
__A : str = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
__A : List[str] = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : str = self.get_inputs(_UpperCAmelCase)
__A : str = 25
__A : Union[str, Any] = audioldm_pipe(**_UpperCAmelCase).audios[0]
assert audio.ndim == 1
assert len(_UpperCAmelCase) == 8_1920
__A : Optional[Any] = audio[7_7230:7_7240]
__A : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315])
__A : int = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
__A : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
__A : str = audioldm_pipe.to(_UpperCAmelCase)
audioldm_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : List[str] = self.get_inputs(_UpperCAmelCase)
__A : Optional[int] = audioldm_pipe(**_UpperCAmelCase).audios[0]
assert audio.ndim == 1
assert len(_UpperCAmelCase) == 8_1920
__A : Optional[int] = audio[2_7780:2_7790]
__A : Optional[Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212])
__A : Union[str, Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2 | 364 |
'''simple docstring'''
lowercase__ : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowercase__ : List[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> Tuple:
__A : List[str] = start
# add current to visited
visited.append(__snake_case )
__A : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A : int = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
__A : Dict = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
lowercase__ : Tuple = topological_sort('''a''', [], [])
print(sort) | 190 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_A : int = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Tuple ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 229 | '''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : TreeNode | None = None
_SCREAMING_SNAKE_CASE : TreeNode | None = None
def UpperCamelCase_ ( snake_case_ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case_ , snake_case_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case_ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case_ )
)
return is_binary_search_tree_recursive_check(snake_case_ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Any:
# Format the message.
if name is None:
__lowerCAmelCase: str = None
else:
__lowerCAmelCase: Optional[Any] = "." * max(0 , spaces - 2 ) + "# {:" + str(5_0 - spaces ) + "s}"
__lowerCAmelCase: Dict = fmt.format(__SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if msg is not None:
print(__SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(__SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
print(__SCREAMING_SNAKE_CASE , ":" , val.size() )
else:
print(__SCREAMING_SNAKE_CASE , ":" , __SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__lowerCAmelCase: Union[str, Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__lowerCAmelCase: Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
__lowerCAmelCase: Optional[int] = param.view(*__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = param.transpose(0 , 2 )
__lowerCAmelCase: Tuple = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__lowerCAmelCase: List[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
__lowerCAmelCase: int = param.view(*__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = param.transpose(0 , 1 ).contiguous()
__lowerCAmelCase: Optional[Any] = param.view(*__SCREAMING_SNAKE_CASE )
return param
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# The converted output model.
__lowerCAmelCase: Union[str, Any] = {}
# old versions did not store training args
__lowerCAmelCase: str = input_state_dict.get("args" , __SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__lowerCAmelCase: Union[str, Any] = ds_args.padded_vocab_size
__lowerCAmelCase: Dict = ds_args.max_position_embeddings
__lowerCAmelCase: Dict = ds_args.hidden_size
__lowerCAmelCase: Tuple = ds_args.num_layers
__lowerCAmelCase: Any = ds_args.num_attention_heads
__lowerCAmelCase: List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__lowerCAmelCase: List[str] = config.n_head
# The hidden_size per head.
__lowerCAmelCase: Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__lowerCAmelCase: List[Any] = input_state_dict["checkpoint_version"]
else:
__lowerCAmelCase: Union[str, Any] = 0.0
# The model.
__lowerCAmelCase: Union[str, Any] = input_state_dict["model"]
# The language model.
__lowerCAmelCase: Any = model["language_model"]
# The embeddings.
__lowerCAmelCase: Tuple = lm["embedding"]
# The word embeddings.
__lowerCAmelCase: Optional[int] = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
__lowerCAmelCase: List[Any] = word_embeddings[: config.vocab_size, :]
__lowerCAmelCase: Any = word_embeddings
# The position embeddings.
__lowerCAmelCase: str = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__lowerCAmelCase: Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
__lowerCAmelCase: List[str] = pos_embeddings
# The transformer.
__lowerCAmelCase: str = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
__lowerCAmelCase: Any = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
__lowerCAmelCase: Tuple = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__lowerCAmelCase: Union[str, Any] = layer_re.match(__SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__lowerCAmelCase: Optional[int] = int(m.group(1 ) )
# The name of the operation.
__lowerCAmelCase: int = m.group(2 )
# Is it a weight or a bias?
__lowerCAmelCase: int = m.group(3 )
# The name of the layer.
__lowerCAmelCase: List[str] = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
__lowerCAmelCase: Union[str, Any] = "ln_1" if op_name.startswith("input" ) else "ln_2"
__lowerCAmelCase: int = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__lowerCAmelCase: List[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
__lowerCAmelCase: Optional[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
__lowerCAmelCase: Any = masked_bias
__lowerCAmelCase: Optional[Any] = fix_query_key_value_ordering(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__lowerCAmelCase: str = out_val.transpose(0 , 1 ).contiguous()
# Store.
__lowerCAmelCase: Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__lowerCAmelCase: Union[str, Any] = fix_query_key_value_ordering(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Store. No change of shape.
__lowerCAmelCase: Optional[int] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__lowerCAmelCase: List[str] = megatron_to_transformers[op_name]
__lowerCAmelCase: List[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__lowerCAmelCase: str = megatron_to_transformers[op_name]
__lowerCAmelCase: List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__lowerCAmelCase: Tuple = transformer["final_layernorm.weight"]
__lowerCAmelCase: Union[str, Any] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
__lowerCAmelCase: List[str] = word_embeddings
# It should be done!
return output_state_dict
def a__ ( ) -> Tuple:
# Create the argument parser.
__lowerCAmelCase: List[str] = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=__SCREAMING_SNAKE_CASE , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=__SCREAMING_SNAKE_CASE , help="An optional config json file describing the pre-trained model." , )
__lowerCAmelCase: Tuple = parser.parse_args()
# Extract the basename.
__lowerCAmelCase: Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
__lowerCAmelCase: List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" )
else:
__lowerCAmelCase: Dict = torch.load(args.path_to_checkpoint , map_location="cpu" )
__lowerCAmelCase: Any = input_state_dict.get("args" , __SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__lowerCAmelCase: Union[str, Any] = "gelu_fast"
elif ds_args.openai_gelu:
__lowerCAmelCase: int = "gelu_new"
else:
__lowerCAmelCase: Dict = "gelu"
else:
# in the very early days this used to be "gelu_new"
__lowerCAmelCase: Union[str, Any] = "gelu_new"
# Spell out all parameters in case the defaults change.
__lowerCAmelCase: List[Any] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=__SCREAMING_SNAKE_CASE , summary_activation=__SCREAMING_SNAKE_CASE , summary_proj_to_labels=__SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
__lowerCAmelCase: Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
__lowerCAmelCase: str = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
__lowerCAmelCase: str = convert_megatron_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__lowerCAmelCase: Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__lowerCAmelCase: List[Any] = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
__lowerCAmelCase: Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
__lowerCAmelCase: Dict = "gpt2"
__lowerCAmelCase: List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = type(__SCREAMING_SNAKE_CASE ).__name__
__lowerCAmelCase: Dict = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
__lowerCAmelCase: Any = os.path.join(__SCREAMING_SNAKE_CASE , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 108 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
def __init__( self : str , UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Optional[int] = model
__lowerCAmelCase: Tuple = 2
__lowerCAmelCase: List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels)
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
pass
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# load longformer model from model identifier
__lowerCAmelCase: List[str] = LongformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = LightningModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase: Optional[Any] = LongformerForQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 108 | 1 |
"""simple docstring"""
lowerCamelCase_ = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCamelCase ( a_ : str ) -> int:
__SCREAMING_SNAKE_CASE :str = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
__SCREAMING_SNAKE_CASE :Stack[int] = Stack()
__SCREAMING_SNAKE_CASE :Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a_ ) )
elif i in operators:
# RULE 2
operator_stack.push(a_ )
elif i == ")":
# RULE 4
__SCREAMING_SNAKE_CASE :Optional[Any] = operator_stack.peek()
operator_stack.pop()
__SCREAMING_SNAKE_CASE :Any = operand_stack.peek()
operand_stack.pop()
__SCREAMING_SNAKE_CASE :Optional[Any] = operand_stack.peek()
operand_stack.pop()
__SCREAMING_SNAKE_CASE :Any = operators[opr](a_ , a_ )
operand_stack.push(a_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase_ = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}') | 191 |
"""simple docstring"""
def __lowerCamelCase ( a_ : list ) -> list:
if len(a_ ) < 2:
return collection
def circle_sort_util(a_ : list , a_ : int , a_ : int ) -> bool:
__SCREAMING_SNAKE_CASE :int = False
if low == high:
return swapped
__SCREAMING_SNAKE_CASE :int = low
__SCREAMING_SNAKE_CASE :Tuple = high
while left < right:
if collection[left] > collection[right]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = (
collection[right],
collection[left],
)
__SCREAMING_SNAKE_CASE :Optional[int] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = (
collection[right + 1],
collection[left],
)
__SCREAMING_SNAKE_CASE :Tuple = True
__SCREAMING_SNAKE_CASE :int = low + int((high - low) / 2 )
__SCREAMING_SNAKE_CASE :int = circle_sort_util(a_ , a_ , a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = circle_sort_util(a_ , mid + 1 , a_ )
return swapped or left_swap or right_swap
__SCREAMING_SNAKE_CASE :Optional[Any] = True
while is_not_sorted is True:
__SCREAMING_SNAKE_CASE :Tuple = circle_sort_util(a_ , 0 , len(a_ ) - 1 )
return collection
if __name__ == "__main__":
lowerCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase_ = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted)) | 191 | 1 |
'''simple docstring'''
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = None
lowercase_ : Union[str, Any] = None
lowercase_ : Any = graph
self._normalize_graph(lowercase__ , lowercase__ )
lowercase_ : str = len(lowercase__ )
lowercase_ : Optional[Any] = None
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if sources is int:
lowercase_ : Union[str, Any] = [sources]
if sinks is int:
lowercase_ : Dict = [sinks]
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
return
lowercase_ : Union[str, Any] = sources[0]
lowercase_ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase__ ) > 1 or len(lowercase__ ) > 1:
lowercase_ : Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase_ : List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase_ : Optional[int] = max_input_flow
lowercase_ : Optional[Any] = 0
lowercase_ : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase_ : List[Any] = max_input_flow
lowercase_ : Union[str, Any] = size - 1
def _snake_case ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = algorithm(self )
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = flow_network
lowercase_ : List[str] = flow_network.verticesCount
lowercase_ : Any = flow_network.sourceIndex
lowercase_ : str = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase_ : Any = flow_network.graph
lowercase_ : List[str] = False
def _snake_case ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase_ : Dict = True
def _snake_case ( self ):
"""simple docstring"""
pass
class lowerCAmelCase__ ( lowerCamelCase__ ):
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(lowercase__ )
# use this to save your result
lowercase_ : List[str] = -1
def _snake_case ( self ):
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCAmelCase__ ( lowerCamelCase__ ):
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(lowercase__ )
lowercase_ : str = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase_ : Dict = [0] * self.verticies_count
lowercase_ : str = [0] * self.verticies_count
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase_ : Union[str, Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase_ : List[Any] = 0
while i < len(lowercase__ ):
lowercase_ : Tuple = vertices_list[i]
lowercase_ : Dict = self.heights[vertex_index]
self.process_vertex(lowercase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase__ ) )
lowercase_ : Any = 0
else:
i += 1
lowercase_ : str = sum(self.preflow[self.source_index] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase__ , lowercase__ )
self.relabel(lowercase__ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase_ : List[Any] = self.heights[to_index]
if min_height is not None:
lowercase_ : List[str] = min_height + 1
if __name__ == "__main__":
_lowercase : Optional[int] = [0]
_lowercase : List[str] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_lowercase : Optional[int] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_lowercase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_lowercase : Dict = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 358 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = num_of_nodes
lowercase_ : list[list[int]] = []
lowercase_ : dict[int, int] = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ : Optional[int] = self.find_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase_ : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ : int = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = []
lowercase_ : Optional[Any] = 0
lowercase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ , lowercase_ , lowercase_ : List[Any] = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ , lowercase_ : str = edge
lowercase_ : Tuple = self.m_component[u]
lowercase_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase_ : str = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
'''simple docstring'''
from math import pow
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCAmelCase__ : Optional[int] = int(pow(UpperCamelCase , UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = backtrack(
UpperCamelCase , UpperCamelCase , current_number + 1 , UpperCamelCase , UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = backtrack(
UpperCamelCase , UpperCamelCase , current_number + 1 , UpperCamelCase , UpperCamelCase )
return current_sum, solutions_count
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCamelCase , UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = abs(UpperCamelCase )
lowerCAmelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : str = f"""{func.__name__}({value})"""
lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["sentencepiece"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] ) | 267 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''c'''] )
self.assertEqual(_UpperCAmelCase , [2] )
# Out indices set to match out features
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(['''a''', '''c'''] , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features set to match out indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features selected from negative indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [-3, -1] )
def UpperCamelCase__ ( self ):
# Stage names must be set
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , _UpperCAmelCase )
# Out features must be a list
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def UpperCamelCase__ ( self ):
snake_case_ = BackboneMixin()
snake_case_ = ['''a''', '''b''', '''c''']
snake_case_ = ['''a''', '''c''']
snake_case_ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case_ = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case_ = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] ) | 267 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =state_dict.pop(lowercase )
a =val
def _A ( lowercase ):
"""simple docstring"""
a =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a =key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
a =value
else:
a =value
return new_state_dict
def _A ( lowercase , lowercase=False ):
"""simple docstring"""
a =''''''
if is_panoptic:
a ='''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[:2_56, :]
a =in_proj_bias[:2_56]
a =in_proj_weight[2_56:5_12, :]
a =in_proj_bias[2_56:5_12]
a =in_proj_weight[-2_56:, :]
a =in_proj_bias[-2_56:]
def _A ( ):
"""simple docstring"""
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
a ='''resnet101'''
if "dc5" in model_name:
a =True
a ='''panoptic''' in model_name
if is_panoptic:
a =2_50
else:
a =91
a ='''huggingface/label-files'''
a ='''coco-detection-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
# load image processor
a ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a =ConditionalDetrImageProcessor(format=lowercase )
# prepare image
a =prepare_img()
a =image_processor(images=lowercase , return_tensors='''pt''' )
a =encoding['''pixel_values''']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
a =torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase , pretrained=lowercase ).eval()
a =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
a ='''conditional_detr.''' + src
rename_key(lowercase , lowercase , lowercase )
a =rename_backbone_keys(lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase , is_panoptic=lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a ='''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a =state_dict.pop(lowercase )
a =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a =state_dict.pop(lowercase )
a =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a =state_dict.pop(lowercase )
a =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a =state_dict.pop(lowercase )
a =val
# finally, create HuggingFace model and load state dict
a =ConditionalDetrForSegmentation(lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase )
model.load_state_dict(lowercase )
model.eval()
model.push_to_hub(repo_id=lowercase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
a =conditional_detr(lowercase )
a =model(lowercase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 81 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __a :
__snake_case : Optional[Any] = PegasusConfig
__snake_case : List[str] = {}
__snake_case : Dict = """gelu"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : str=20 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=1 , UpperCAmelCase : Union[str, Any]=0 , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = eos_token_id
lowerCAmelCase_ : List[Any] = pad_token_id
lowerCAmelCase_ : Any = bos_token_id
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase_ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ : List[Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase_ : List[str] = prepare_pegasus_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def A ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = 20
lowerCAmelCase_ : int = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : str = model.encode(inputs_dict["""input_ids"""] )
lowerCAmelCase_ , lowerCAmelCase_ : Any = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCAmelCase_ : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model.decode(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = 20
lowerCAmelCase_ : Union[str, Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : List[str] = model.encode(inputs_dict["""input_ids"""] )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCAmelCase_ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase_ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
lowerCAmelCase_ : str = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
lowerCAmelCase_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any]=None , lowercase__ : Any=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase_ : List[Any] = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__snake_case : List[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__snake_case : List[str] = True
__snake_case : Tuple = False
__snake_case : List[Any] = False
__snake_case : str = False
def A ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = FlaxPegasusModelTester(self )
lowerCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase )
def A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase : str , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Union[str, Any] ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
lowerCAmelCase_ : List[str] = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCAmelCase_ : Optional[int] = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : int ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ : Union[str, Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCAmelCase_ : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
lowerCAmelCase_ : Union[str, Any] = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCAmelCase_ : Tuple = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCAmelCase )
lowerCAmelCase_ : List[str] = np.ones((1, 1) )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
def A ( self : int ):
lowerCAmelCase_ : Tuple = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowerCAmelCase_ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowerCAmelCase_ : str = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowerCAmelCase_ : int = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowerCAmelCase_ : Any = tokenizer(UpperCAmelCase , return_tensors="""np""" , truncation=UpperCAmelCase , max_length=5_12 , padding=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = model.generate(**UpperCAmelCase , num_beams=2 ).sequences
lowerCAmelCase_ : List[str] = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
assert tgt_text == decoded
| 28 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__UpperCAmelCase : Tuple = namedtuple("covid_data", "cases deaths recovered")
def A__ ( SCREAMING_SNAKE_CASE__ = "https://www.worldometers.info/coronavirus/") -> covid_data:
__snake_case: Optional[int] = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__).content).xpath(SCREAMING_SNAKE_CASE__))
__UpperCAmelCase : Any = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 111 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """token-classification"""
def __init__( self : Union[str, Any] , A : List[Any] ):
if type(A ) == dict:
__snake_case: str = Namespace(**A )
__snake_case: str = import_module("""tasks""" )
try:
__snake_case: Tuple = getattr(A , hparams.task_type )
__snake_case: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
__snake_case: Optional[int] = self.token_classification_task.get_labels(hparams.labels )
__snake_case: str = CrossEntropyLoss().ignore_index
super().__init__(A , len(self.labels ) , self.mode )
def UpperCAmelCase__ ( self : Union[str, Any] , **A : Union[str, Any] ):
return self.model(**A )
def UpperCAmelCase__ ( self : Any , A : Optional[int] , A : str ):
__snake_case: str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: Any = self(**A )
__snake_case: Any = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = self.hparams
for mode in ["train", "dev", "test"]:
__snake_case: Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A )
__snake_case: Tuple = torch.load(A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__snake_case: List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , A )
__snake_case: Optional[int] = self.token_classification_task.convert_examples_to_features(
A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , A )
torch.save(A , A )
def UpperCAmelCase__ ( self : List[str] , A : int , A : int , A : bool = False ):
__snake_case: List[str] = self._feature_file(A )
logger.info("""Loading features from cached file %s""" , A )
__snake_case: int = torch.load(A )
__snake_case: Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case: Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__snake_case: Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__snake_case: Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__snake_case: Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A )
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] , A : int ):
"""Compute validation""" ""
__snake_case: List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: List[str] = self(**A )
__snake_case , __snake_case: int = outputs[:2]
__snake_case: List[str] = logits.detach().cpu().numpy()
__snake_case: Any = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__ ( self : Dict , A : Dict ):
__snake_case: Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__snake_case: Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__snake_case: List[str] = np.argmax(A , axis=2 )
__snake_case: Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__snake_case: Tuple = dict(enumerate(self.labels ) )
__snake_case: Dict = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case: Tuple = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__snake_case: Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(A , A ),
"""precision""": precision_score(A , A ),
"""recall""": recall_score(A , A ),
"""f1""": fa_score(A , A ),
}
__snake_case: Dict = dict(results.items() )
__snake_case: Dict = results
return ret, preds_list, out_label_list
def UpperCAmelCase__ ( self : Tuple , A : int ):
# when stable
__snake_case , __snake_case , __snake_case: List[str] = self._eval_end(A )
__snake_case: int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__ ( self : Dict , A : Optional[int] ):
# updating to test_epoch_end instead of deprecated test_end
__snake_case , __snake_case , __snake_case: int = self._eval_end(A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case: List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__ ( A : Any , A : int ):
# Add NER specific options
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCAmelCase : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Optional[int] = NERTransformer(args)
__UpperCAmelCase : Tuple = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__UpperCAmelCase : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 111 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
_A : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[int] = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase : Tuple = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : List[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase : Tuple = field(
default=lowerCAmelCase_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
_UpperCAmelCase : Dict = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase : Dict = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Dict = field(default=lowerCAmelCase_ ,metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase : Optional[Any] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} ,)
_UpperCAmelCase : Any = field(
default=lowerCAmelCase_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={"help": "The number of processes to use for the preprocessing."} ,)
_UpperCAmelCase : int = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase : List[Any] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
if self.train_file is not None:
lowerCamelCase__ : List[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase__ : Optional[int] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : List[str] = 4_2
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = None
_UpperCAmelCase : Dict = None
def __call__( self : Union[str, Any] , A : Any ) ->Optional[int]:
lowerCamelCase__ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase__ : Optional[Any] = [feature.pop(__A ) for feature in features]
lowerCamelCase__ : str = len(__A )
lowerCamelCase__ : Any = len(features[0]['''input_ids'''] )
lowerCamelCase__ : List[str] = [
[{k: v[i] for k, v in feature.items()} for i in range(__A )] for feature in features
]
lowerCamelCase__ : Any = list(chain(*__A ) )
lowerCamelCase__ : List[str] = self.tokenizer.pad(
__A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase__ : Dict = {k: v.view(__A , __A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase__ : Dict = torch.tensor(__A , dtype=torch.intaa )
return batch
def _a ( ) -> Any:
"""simple docstring"""
lowerCamelCase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : str = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase__ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase__ : Optional[int] = {}
if data_args.train_file is not None:
lowerCamelCase__ : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : Optional[Any] = data_args.validation_file
lowerCamelCase__ : Tuple = data_args.train_file.split('''.''' )[-1]
lowerCamelCase__ : Union[str, Any] = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase__ : List[str] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase__ : List[Any] = [f"ending{i}" for i in range(4 )]
lowerCamelCase__ : Union[str, Any] = '''sent1'''
lowerCamelCase__ : Optional[int] = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase__ : Optional[int] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase__ : int = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase__ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = [[context] * 4 for context in examples[context_name]]
lowerCamelCase__ : Optional[Any] = examples[question_header_name]
lowerCamelCase__ : Tuple = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase__ : Optional[int] = list(chain(*_lowercase ) )
lowerCamelCase__ : str = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase__ : int = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase__ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase__ : Union[str, Any] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase__ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase__ : Optional[Any] = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase__ : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase__ : int = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase__ : str = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase__ : int = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase__ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : int = eval_predictions
lowerCamelCase__ : List[str] = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase__ : List[Any] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Optional[Any] = last_checkpoint
lowerCamelCase__ : List[Any] = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : int = train_result.metrics
lowerCamelCase__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase__ : Optional[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : str = trainer.evaluate()
lowerCamelCase__ : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase__ : Any = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase__ : str = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 359 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 265 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE_: List[str] =data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE_: str =data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE_: List[str] =data_utils
SCREAMING_SNAKE_CASE_: Optional[Any] =data_utils
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(snake_case_ )
UpperCAmelCase_ = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(snake_case_ )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __a ( A__ ):
_lowerCAmelCase : Optional[Any] = '''conditional_detr'''
_lowerCAmelCase : List[Any] = ['''past_key_values''']
_lowerCAmelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Any=3_00 , SCREAMING_SNAKE_CASE : Tuple=6 , SCREAMING_SNAKE_CASE : int=20_48 , SCREAMING_SNAKE_CASE : Union[str, Any]=8 , SCREAMING_SNAKE_CASE : int=6 , SCREAMING_SNAKE_CASE : Dict=20_48 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int="relu" , SCREAMING_SNAKE_CASE : Any=2_56 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=1.0 , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : int="sine" , SCREAMING_SNAKE_CASE : str="resnet50" , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Tuple=5 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : List[Any]=0.2_5 , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = backbone_config.get("model_type" )
UpperCamelCase__ : Tuple = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Any = config_class.from_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = use_timm_backbone
UpperCamelCase__ : List[Any] = backbone_config
UpperCamelCase__ : Tuple = num_channels
UpperCamelCase__ : List[Any] = num_queries
UpperCamelCase__ : Dict = d_model
UpperCamelCase__ : Any = encoder_ffn_dim
UpperCamelCase__ : List[str] = encoder_layers
UpperCamelCase__ : List[str] = encoder_attention_heads
UpperCamelCase__ : Optional[int] = decoder_ffn_dim
UpperCamelCase__ : str = decoder_layers
UpperCamelCase__ : Optional[Any] = decoder_attention_heads
UpperCamelCase__ : int = dropout
UpperCamelCase__ : Optional[int] = attention_dropout
UpperCamelCase__ : Any = activation_dropout
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : int = init_std
UpperCamelCase__ : List[Any] = init_xavier_std
UpperCamelCase__ : List[str] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : List[Any] = encoder_layers
UpperCamelCase__ : Optional[Any] = auxiliary_loss
UpperCamelCase__ : List[str] = position_embedding_type
UpperCamelCase__ : Optional[Any] = backbone
UpperCamelCase__ : Optional[int] = use_pretrained_backbone
UpperCamelCase__ : List[Any] = dilation
# Hungarian matcher
UpperCamelCase__ : List[str] = class_cost
UpperCamelCase__ : Union[str, Any] = bbox_cost
UpperCamelCase__ : int = giou_cost
# Loss coefficients
UpperCamelCase__ : str = mask_loss_coefficient
UpperCamelCase__ : List[Any] = dice_loss_coefficient
UpperCamelCase__ : int = cls_loss_coefficient
UpperCamelCase__ : Tuple = bbox_loss_coefficient
UpperCamelCase__ : Any = giou_loss_coefficient
UpperCamelCase__ : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self.d_model
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ : List[str] = self.backbone_config.to_dict()
UpperCamelCase__ : int = self.__class__.model_type
return output
class __a ( A__ ):
_lowerCAmelCase : Tuple = version.parse('''1.11''' )
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 1e-5
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return 12 | 189 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[str] ={
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (PNDMScheduler,)
__lowercase = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ ( self :List[str] , **lowercase_ :Optional[int] )-> List[str]:
A__ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[int]=0 , **lowercase_ :Any )-> int:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
pass
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Optional[Any]=0 , **lowercase_ :List[str] )-> str:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :Optional[int] , **lowercase_ :Dict )-> Optional[int]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A__ = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self :Any )-> Dict:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> str:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A__ = 27
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCAmelCase_ ( self :List[str] )-> str:
with self.assertRaises(lowercase_ ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ ( self :int )-> str:
A__ = self.full_loop()
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = self.full_loop(prediction_type="v_prediction" )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def UpperCAmelCase_ ( self :Dict )-> Any:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 123 | 1 |
'''simple docstring'''
class lowercase__ :
def __init__( self : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : int = name
_UpperCamelCase : Optional[int] = value
_UpperCamelCase : Tuple = weight
def __repr__( self : Optional[int] ):
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.value
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.name
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self.weight
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.value / self.weight
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : str = []
for i in range(len(UpperCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = sorted(UpperCAmelCase_ , key=UpperCAmelCase_ , reverse=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase , _UpperCamelCase : List[Any] = 0.0, 0.0
for i in range(len(UpperCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 1 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1024 ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = [], []
__SCREAMING_SNAKE_CASE = list(zip(UpperCamelCase_ , UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(UpperCamelCase_ ):
return tok(UpperCamelCase_ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__SCREAMING_SNAKE_CASE = new_src + """ """ + src
__SCREAMING_SNAKE_CASE = new_tgt + """ """ + tgt
if is_too_big(UpperCamelCase_ ) or is_too_big(UpperCamelCase_ ): # cant fit, finalize example
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
return finished_src, finished_tgt
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Path(UpperCamelCase_ )
save_path.mkdir(exist_ok=UpperCamelCase_ )
for split in ["train"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = data_dir / f"{split}.source", data_dir / f"{split}.target"
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pack_examples(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(f"packed {split} split from {len(UpperCamelCase_ )} examples -> {len(UpperCamelCase_ )}." )
Path(save_path / f"{split}.source" ).open("""w""" ).write("""\n""".join(UpperCamelCase_ ) )
Path(save_path / f"{split}.target" ).open("""w""" ).write("""\n""".join(UpperCamelCase_ ) )
for split in ["val", "test"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(UpperCamelCase_ , save_path / f"{split}.source" )
shutil.copyfile(UpperCamelCase_ , save_path / f"{split}.target" )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCamelCase_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCamelCase_ , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCamelCase_ )
parser.add_argument("""--save_path""" , type=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 255 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__magic_name__ = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {}
state_dict.pop("""pixel_mean""" , UpperCamelCase_ )
state_dict.pop("""pixel_std""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE = key.replace(UpperCamelCase_ , UpperCamelCase_ )
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) )
if layer_nb == 0:
__SCREAMING_SNAKE_CASE = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
__SCREAMING_SNAKE_CASE = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
__SCREAMING_SNAKE_CASE = key.replace("""layers.2""" , """proj_out""" )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="ybelkada/segment-anything" ):
__SCREAMING_SNAKE_CASE = hf_hub_download(UpperCamelCase_ , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__SCREAMING_SNAKE_CASE = SamConfig()
elif "sam_vit_l" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
elif "sam_vit_h" in model_name:
__SCREAMING_SNAKE_CASE = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__SCREAMING_SNAKE_CASE = SamConfig(
vision_config=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = replace_keys(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SamModel(UpperCamelCase_ )
hf_model.load_state_dict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = hf_model.to("""cuda""" )
__SCREAMING_SNAKE_CASE = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = [[[400, 650]]]
__SCREAMING_SNAKE_CASE = [[1]]
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__SCREAMING_SNAKE_CASE = ((75, 275, 1725, 850),)
__SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__SCREAMING_SNAKE_CASE = [[[400, 650], [800, 650]]]
__SCREAMING_SNAKE_CASE = [[1, 1]]
__SCREAMING_SNAKE_CASE = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__magic_name__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 255 | 1 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "data2vec-audio"
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-5 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=19 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : int=0.05 , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=10 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : List[str]="sum" , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=256 , __SCREAMING_SNAKE_CASE : Dict=(512, 512, 512, 512, 1_500) , __SCREAMING_SNAKE_CASE : str=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Optional[int]=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = conv_pos_kernel_size
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
__SCREAMING_SNAKE_CASE = add_adapter
__SCREAMING_SNAKE_CASE = adapter_kernel_size
__SCREAMING_SNAKE_CASE = adapter_stride
__SCREAMING_SNAKE_CASE = num_adapter_layers
__SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
return math.prod(self.conv_stride )
| 267 |
'''simple docstring'''
from __future__ import annotations
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if len(a__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(a__ )
or left < -len(a__ )
or right >= len(a__ )
or right < -len(a__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__SCREAMING_SNAKE_CASE = (left + right) >> 1 # the middle
__SCREAMING_SNAKE_CASE = find_max(a__ , a__ , a__ ) # find max in range[left, mid]
__SCREAMING_SNAKE_CASE = find_max(a__ , mid + 1 , a__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 267 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """layoutlmv3"""
def __init__( self : str , __lowercase : Optional[int]=5_02_65 , __lowercase : Any=7_68 , __lowercase : Optional[Any]=12 , __lowercase : Optional[Any]=12 , __lowercase : Union[str, Any]=30_72 , __lowercase : Union[str, Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Union[str, Any]=5_12 , __lowercase : Dict=2 , __lowercase : Dict=0.02 , __lowercase : Any=1e-5 , __lowercase : int=1 , __lowercase : Tuple=0 , __lowercase : List[Any]=2 , __lowercase : Optional[int]=10_24 , __lowercase : Any=1_28 , __lowercase : Union[str, Any]=1_28 , __lowercase : Dict=True , __lowercase : Tuple=32 , __lowercase : int=1_28 , __lowercase : int=64 , __lowercase : Optional[Any]=2_56 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : Any=True , __lowercase : Dict=2_24 , __lowercase : Optional[int]=3 , __lowercase : Any=16 , __lowercase : Tuple=None , **__lowercase : int , ) -> Optional[int]:
super().__init__(
vocab_size=__lowercase , hidden_size=__lowercase , num_hidden_layers=__lowercase , num_attention_heads=__lowercase , intermediate_size=__lowercase , hidden_act=__lowercase , hidden_dropout_prob=__lowercase , attention_probs_dropout_prob=__lowercase , max_position_embeddings=__lowercase , type_vocab_size=__lowercase , initializer_range=__lowercase , layer_norm_eps=__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =max_ad_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =coordinate_size
SCREAMING_SNAKE_CASE__ : Dict =shape_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =has_relative_attention_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rel_pos_bins
SCREAMING_SNAKE_CASE__ : Tuple =max_rel_pos
SCREAMING_SNAKE_CASE__ : str =has_spatial_attention_bias
SCREAMING_SNAKE_CASE__ : Optional[Any] =rel_ad_pos_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] =max_rel_ad_pos
SCREAMING_SNAKE_CASE__ : int =text_embed
SCREAMING_SNAKE_CASE__ : Any =visual_embed
SCREAMING_SNAKE_CASE__ : Optional[int] =input_size
SCREAMING_SNAKE_CASE__ : Dict =num_channels
SCREAMING_SNAKE_CASE__ : Dict =patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = version.parse("""1.12""" )
@property
def __magic_name__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __magic_name__ ( self : List[str] ) -> float:
return 1e-5
@property
def __magic_name__ ( self : Union[str, Any] ) -> int:
return 12
def __magic_name__ ( self : Dict , __lowercase : "ProcessorMixin" , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 40 , __lowercase : int = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , __lowercase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Tuple =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : int =processor.tokenizer.num_special_tokens_to_add(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Optional[Any] =[[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE__ : Optional[int] =[[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE__ : List[Any] =self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =dict(
processor(
__lowercase , text=__lowercase , boxes=__lowercase , return_tensors=__lowercase , ) )
return inputs | 222 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 5_1_2}
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =set()
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =char
SCREAMING_SNAKE_CASE__ : Any =set(UpperCamelCase__ )
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : List[str]="__start__" , __lowercase : Union[str, Any]="__end__" , __lowercase : str="__unk__" , __lowercase : Union[str, Any]="__null__" , **__lowercase : List[str] , ) -> Optional[int]:
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Any =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict ={v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : int =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : List[Any] =[tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ : int =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict ={}
@property
def __magic_name__ ( self : Any ) -> int:
return len(self.encoder )
def __magic_name__ ( self : Tuple ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> str:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''([.,!?()])''' , r''' \1''' , __lowercase )
SCREAMING_SNAKE_CASE__ : int =re.sub('''(\')''' , r''' \1 ''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =re.sub(r'''\s{2,}''' , ''' ''' , __lowercase )
if "\n" in token:
SCREAMING_SNAKE_CASE__ : List[str] =token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE__ : Dict =token.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Dict =[]
for token in tokens:
if not len(__lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] =token.lower()
SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
SCREAMING_SNAKE_CASE__ : int =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =bigram
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =0
while i < len(__lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] =word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Optional[int] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : int =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : int =new_word
if len(__lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Any =get_pairs(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''@@ '''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =word[:-4]
SCREAMING_SNAKE_CASE__ : str =word
words.append(__lowercase )
return " ".join(__lowercase )
def __magic_name__ ( self : str , __lowercase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : List[Any] =re.findall(r'''\S+\n?''' , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : int =token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , __lowercase : int ) -> str:
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : List[Any] , __lowercase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : str =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : int =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : List[str] =0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Any =token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file | 222 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = [10, 20, 30, 40, 50, 60]
_SCREAMING_SNAKE_CASE : List[str] = [2, 4, 6, 8, 10, 12]
_SCREAMING_SNAKE_CASE : str = 100
self.assertEqual(kp.calc_profit(__snake_case , __snake_case , __snake_case ) , 210 )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """Weight can not be negative.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """Profit can not be negative.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" )
def UpperCAmelCase_ ( self ):
self.assertRaisesRegex(
__snake_case , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 200 | 0 |
from __future__ import annotations
import numpy as np
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
SCREAMING_SNAKE_CASE = (
"""'table' has to be of square shaped array but got a """
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
SCREAMING_SNAKE_CASE = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : torch.FloatTensor
_UpperCamelCase : Optional[torch.FloatTensor] = None
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = 2
@register_to_config
def __init__( self , a__ = 0.0_2 , a__ = 100 , a__ = 1.0_0_7 , a__ = 80 , a__ = 0.0_5 , a__ = 50 , ):
# standard deviation of the initial noise distribution
_lowerCAmelCase : Union[str, Any] = sigma_max
# setable values
_lowerCAmelCase : int = None
_lowerCAmelCase : np.IntTensor = None
_lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def __A ( self , a__ , a__ = None ):
return sample
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[int] = num_inference_steps
_lowerCAmelCase : Any = np.arange(0 , self.num_inference_steps )[::-1].copy()
_lowerCAmelCase : int = torch.from_numpy(a__ ).to(a__ )
_lowerCAmelCase : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase : Dict = torch.tensor(a__ , dtype=torch.floataa , device=a__ )
def __A ( self , a__ , a__ , a__ = None ):
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCAmelCase : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase : str = self.config.s_noise * randn_tensor(sample.shape , generator=a__ ).to(sample.device )
_lowerCAmelCase : Dict = sigma + gamma * sigma
_lowerCAmelCase : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __A ( self , a__ , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : Union[str, Any] = sample_hat + sigma_hat * model_output
_lowerCAmelCase : List[str] = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a__ , derivative=a__ , pred_original_sample=a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : Tuple = sample_prev + sigma_prev * model_output
_lowerCAmelCase : str = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a__ , derivative=a__ , pred_original_sample=a__ )
def __A ( self , a__ , a__ , a__ ):
raise NotImplementedError()
| 44 | """simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44 | 1 |
"""simple docstring"""
from __future__ import annotations
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : List[str] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_SCREAMING_SNAKE_CASE ) != 0:
A_ : Union[str, Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_SCREAMING_SNAKE_CASE ) != cols:
raise error
for value in row:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise error
A_ : int = rows
else:
A_ : Union[str, Any] = []
def _snake_case ( self )->list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _snake_case ( self )->int:
'''simple docstring'''
return len(self.rows )
@property
def _snake_case ( self )->int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _snake_case ( self )->tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _snake_case ( self )->bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _snake_case ( self )->Matrix:
'''simple docstring'''
A_ : Optional[Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _snake_case ( self )->bool:
'''simple docstring'''
return bool(self.determinant() )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Tuple = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_SCREAMING_SNAKE_CASE ).determinant()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return -1 * self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _snake_case ( self )->Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _snake_case ( self )->Matrix:
'''simple docstring'''
A_ : Optional[int] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Matrix:
'''simple docstring'''
A_ : Dict = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self )->str:
'''simple docstring'''
return str(self.rows )
def __str__( self )->str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_SCREAMING_SNAKE_CASE ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->None:
'''simple docstring'''
A_ : Optional[int] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise type_error
for value in row:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(_SCREAMING_SNAKE_CASE ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = self.rows[0:position] + [row] + self.rows[position:]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->None:
'''simple docstring'''
A_ : Any = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise type_error
for value in column:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(_SCREAMING_SNAKE_CASE ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
A_ : Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A_ : Optional[Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _SCREAMING_SNAKE_CASE )->bool:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _SCREAMING_SNAKE_CASE )->bool:
'''simple docstring'''
return not self == other
def __neg__( self )->Matrix:
'''simple docstring'''
return self * -1
def __add__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , _SCREAMING_SNAKE_CASE )->Matrix:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
A_ : str = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if num <= 0:
A_ : Optional[int] = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = [True] * (num + 1)
A_ : Tuple = []
A_ : Union[str, Any] = 2
A_ : Any = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 65 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 76 |
def __lowercase ( _UpperCamelCase = 4000000 ) ->int:
"""simple docstring"""
lowercase : int = []
lowercase , lowercase : str = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCamelCase )
lowercase , lowercase : Dict = b, a + b
return sum(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase_ : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Predict target for test data
UpperCAmelCase_ : str = xgb.predict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) , 1 )
return predictions
def a__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Dict = fetch_california_housing()
UpperCAmelCase_ , UpperCAmelCase_ : Dict = data_handling(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = train_test_split(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 )
UpperCAmelCase_ : Dict = xgboost(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' )
print(F'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 67 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ : List[str] = GPTaConfig()
else:
UpperCAmelCase_ : int = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =CLIPTokenizer
_lowercase =CLIPTokenizerFast
_lowercase =True
_lowercase ={}
_lowercase =False
def __a ( self ) -> Dict:
super().setUp()
# fmt: off
lowerCAmelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCamelCase ) )
def __a ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , **_UpperCamelCase ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowerCAmelCase_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase_ = "xa\u0303y" + " " + "x\xe3y"
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase_ = f""" {text}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __a ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ) -> str:
super().test_tokenization_python_rust_equals()
def __a ( self ) -> Any:
# CLIP always lower cases letters
pass
| 231 | 0 |
def __UpperCAmelCase ( snake_case_ : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = sum(snake_case_ )
_lowerCAmelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowerCAmelCase = True
for i in range(1 , s + 1 ):
_lowerCAmelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowerCAmelCase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowerCAmelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowerCAmelCase = s - 2 * j
break
return diff | 357 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 317 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_snake_case = logging.getLogger(__name__)
def _A ( snake_case , snake_case ) -> List[Any]:
# save results
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case , "config.json" ) ):
os.remove(os.path.join(snake_case , "config.json" ) )
if os.path.exists(os.path.join(snake_case , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def _A ( snake_case , snake_case=False ) -> int:
_lowercase : Union[str, Any] = 2
if unlogit:
_lowercase : Optional[Any] = torch.pow(snake_case , snake_case )
_lowercase : List[Any] = p * torch.log(snake_case )
_lowercase : str = 0
return -plogp.sum(dim=-1 )
def _A ( snake_case ) -> List[Any]:
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _A ( snake_case , snake_case , snake_case , snake_case=True , snake_case=True , snake_case=None , snake_case=False ) -> Optional[int]:
_lowercase , _lowercase : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads
_lowercase : Optional[int] = torch.zeros(snake_case , snake_case ).to(args.device )
_lowercase : str = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
_lowercase : Any = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowercase : int = None
_lowercase : List[str] = 0.0
_lowercase : str = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_lowercase : Dict = tuple(t.to(args.device ) for t in inputs )
((_lowercase) , ) : Any = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowercase : str = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowercase , _lowercase , _lowercase : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
_lowercase : Optional[int] = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowercase : List[str] = 2
_lowercase : Dict = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_lowercase : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case )
logger.info("Head ranked by importance scores" )
_lowercase : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowercase : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
_lowercase : Optional[Any] = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def _A ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_lowercase , _lowercase , _lowercase : Union[str, Any] = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
_lowercase : int = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case , original_score * args.masking_threshold )
_lowercase : List[Any] = torch.ones_like(snake_case )
_lowercase : Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowercase : Union[str, Any] = original_score
while current_score >= original_score * args.masking_threshold:
_lowercase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowercase : Dict = float("Inf" )
_lowercase : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_lowercase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_lowercase : int = new_head_mask.view(-1 )
_lowercase : Union[str, Any] = 0.0
_lowercase : Dict = new_head_mask.view_as(snake_case )
_lowercase : str = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
_lowercase , _lowercase , _lowercase : Any = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
_lowercase : str = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _A ( snake_case , snake_case , snake_case , snake_case ) -> Any:
_lowercase : List[Any] = datetime.now()
_lowercase , _lowercase , _lowercase : List[Any] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
_lowercase : Tuple = 1 / loss
_lowercase : List[Any] = datetime.now() - before_time
_lowercase : int = sum(p.numel() for p in model.parameters() )
_lowercase : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
_lowercase : Optional[Any] = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
_lowercase : List[str] = sum(p.numel() for p in model.parameters() )
_lowercase : int = datetime.now()
_lowercase , _lowercase , _lowercase : Any = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
_lowercase : List[Any] = 1 / loss
_lowercase : int = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case , snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case , snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 )
save_model(snake_case , args.output_dir )
def _A ( ) -> int:
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case , type=snake_case , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=snake_case , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case , help="Batch size." )
parser.add_argument("--seed" , type=snake_case , default=42 )
parser.add_argument("--local_rank" , type=snake_case , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
_lowercase : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowercase : Any = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_lowercase : Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowercase : List[Any] = torch.device("cuda" , args.local_rank )
_lowercase : Dict = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowercase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowercase : str = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
_lowercase : Dict = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case )
# Prepare dataset
_lowercase : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowercase : List[str] = (torch.from_numpy(snake_case ),)
_lowercase : Dict = TensorDataset(*snake_case )
_lowercase : List[Any] = RandomSampler(snake_case )
_lowercase : str = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowercase : int = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 250 | 0 |
def UpperCamelCase__( UpperCamelCase__ : str )->str:
A__ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase__( UpperCamelCase__ : str )->dict[str, str]:
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(UpperCamelCase__ )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(UpperCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase__ ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
return "".join(cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( )->None:
A__ = input('''Enter message to encode or decode: ''' ).strip()
A__ = input('''Enter keyword: ''' ).strip()
A__ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
A__ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
A__ = create_cipher_map(UpperCamelCase__ )
print(func(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
# pass variant but use the non-variant filenames
A__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
A__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
A__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase,variant=__lowerCamelCase ) )
| 39 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase__ = 2_5_0_0_0_4
lowerCAmelCase__ = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = MBartaaTokenizer
__lowerCamelCase = MBartaaTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "<s>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowercase ) , 1054 )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = MBartaaTokenizer(lowercase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowercase )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
A__ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A__ = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase )
A__ = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase )
A__ = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
A__ = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase )
A__ = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
A__ = tokenizer_p.save_pretrained(lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(lowercase )
A__ = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = 'facebook/mbart-large-50-one-to-many-mmt'
__lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__lowerCamelCase = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCamelCase ( cls ) -> Tuple:
'''simple docstring'''
A__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
A__ = 1
return cls
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
self.assertIn(lowercase , self.tokenizer.all_special_ids )
A__ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
A__ = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
self.assertNotIn(self.tokenizer.eos_token , lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowercase )
A__ = 10
A__ = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0]
self.assertEqual(ids[0] , lowercase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase )
A__ = MBartaaTokenizer.from_pretrained(lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors="pt" )
A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
A__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors="pt" )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors="pt" )
A__ = targets["input_ids"]
A__ = shift_tokens_right(lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(lowercase ) , {
# en_XX, A, test, EOS
"input_ids": [[250004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 68 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = ShapEImgaImgPipeline
A__ = ['''image''']
A__ = ['''image''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowercase__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 305 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
UpperCAmelCase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Dict = VOCAB_FILES_NAMES
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Tuple = ["input_ids", "attention_mask"]
a : List[str] = MBartTokenizer
a : List[int] = []
a : List[int] = []
def __init__( self, __magic_name__=None, __magic_name__=None, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="</s>", __magic_name__="<s>", __magic_name__="<unk>", __magic_name__="<pad>", __magic_name__="<mask>", __magic_name__=None, __magic_name__=None, __magic_name__=None, **__magic_name__, ) -> Tuple:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(__magic_name__, lstrip=__magic_name__, rstrip=__magic_name__ ) if isinstance(__magic_name__, __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__, tokenizer_file=__magic_name__, bos_token=__magic_name__, eos_token=__magic_name__, sep_token=__magic_name__, cls_token=__magic_name__, unk_token=__magic_name__, pad_token=__magic_name__, mask_token=__magic_name__, src_lang=__magic_name__, tgt_lang=__magic_name__, additional_special_tokens=__magic_name__, **__magic_name__, )
UpperCamelCase__ : Dict = vocab_file
UpperCamelCase__ : Optional[int] = False if not self.vocab_file else True
UpperCamelCase__ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase__ : Tuple = {
lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase__ : Any = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase__ : int = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ : List[str] = src_lang
UpperCamelCase__ : Optional[Any] = self(__magic_name__, add_special_tokens=__magic_name__, return_tensors=__magic_name__, **__magic_name__ )
UpperCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase__ : str = tgt_lang_id
return inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = "en_XX", __magic_name__ = None, __magic_name__ = "ro_RO", **__magic_name__, ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase__ : List[Any] = src_lang
UpperCamelCase__ : Any = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__, __magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : int = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.convert_tokens_to_ids(__magic_name__ )
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__magic_name__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCamelCase__ : Tuple = os.path.join(
__magic_name__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file, __magic_name__ )
return (out_vocab_file,)
| 247 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : List[str] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCamelCase__ : Dict = Dataset.from_dict(__UpperCAmelCase )
return dataset
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_dataset()
UpperCamelCase__ : List[str] = make_duplicate_clusters(__magic_name__, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_dataset()
UpperCamelCase__ ,UpperCamelCase__ : Dict = deduplicate_dataset(__magic_name__ )
self.assertEqual(len(__magic_name__ ), 2 )
print(__magic_name__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], __magic_name__ )
| 247 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
A: List[str] = logging.get_logger(__name__)
A: str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A: Union[str, Any] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
A: int = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
A: Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = SqueezeBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase_ ) != tokenize_chinese_chars
):
UpperCAmelCase : Optional[int] = getattr(lowercase_ , normalizer_state.pop("""type""" ) )
UpperCAmelCase : List[Any] = do_lower_case
UpperCAmelCase : Any = strip_accents
UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars
UpperCAmelCase : str = normalizer_class(**lowercase_ )
UpperCAmelCase : str = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 109 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = abs(lowercase )
UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = abs(lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A ( lowercase ) -> int:
'''simple docstring'''
return sum(int(lowercase ) for c in str(abs(lowercase ) ) )
def A ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase , lowercase ) -> None:
UpperCamelCase = f'''{func.__name__}({value})'''
UpperCamelCase = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(lowercase )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase , lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 110 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase , eos_token_id=lowercase )
UpperCamelCase = BlipaForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
UpperCamelCase , UpperCamelCase = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase = load_demo_image()
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase = hf_model(lowercase , lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase )
else:
# cast to same type
UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase = ''
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = original_model.generate({'image': original_pixel_values} )
UpperCamelCase = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
UpperCamelCase = input_ids.shape[1]
UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: List[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[str]=True , UpperCamelCase: Optional[int]=True , UpperCamelCase: List[Any]=99 , UpperCamelCase: List[str]=32 , UpperCamelCase: List[Any]=5 , UpperCamelCase: Dict=4 , UpperCamelCase: int=37 , UpperCamelCase: Optional[int]="gelu" , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Optional[int]=5_12 , UpperCamelCase: Union[str, Any]=16 , UpperCamelCase: Optional[int]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: Tuple=4 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("""roberta-base""" , from_pt=UpperCamelCase )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
| 335 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 1 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase_ = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase_ = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase_ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase_ = F'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase_ = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase_ = F'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase_ = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase_ = F'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase_ = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase_ = F'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase_ = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase_ = F'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase_ = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase_ = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase_ = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase_ = 'mid_block.attentions.0.'
lowerCAmelCase_ = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase_ = F'''mid_block.resnets.{j}.'''
lowerCAmelCase_ = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowercase__ : str = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ : List[str] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ : List[Any] = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase__ : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ : Optional[Any] = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = v
lowercase__ : int = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase_ = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase_ = F'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase_ = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase_ = F'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase_ = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase_ = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase_ = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase_ = F'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase_ = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase_ = F'''mid_block.resnets.{i}.'''
lowerCAmelCase_ = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase_ = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : Tuple = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ : Optional[Any] = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[int] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ : str = v.replace(__lowerCamelCase , __lowerCamelCase )
lowercase__ : str = v
lowercase__ : Optional[int] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ : Tuple = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowercase__ : List[str] = reshape_weight_for_sd(__lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase_ = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase_ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase_ = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase_ = {'q': 0, 'k': 1, 'v': 2}
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = {}
lowercase__ : str = {}
lowercase__ : str = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
lowercase__ : Any = k[: -len('''.q_proj.weight''' )]
lowercase__ : List[str] = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
lowercase__ : Any = [None, None, None]
lowercase__ : Optional[int] = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
lowercase__ : Any = k[: -len('''.q_proj.bias''' )]
lowercase__ : List[str] = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
lowercase__ : Tuple = [None, None, None]
lowercase__ : Optional[Any] = v
continue
lowercase__ : List[Any] = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
lowercase__ : Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase__ : List[str] = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
lowercase__ : Tuple = torch.cat(__lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase__ : Any = textenc_pattern.sub(lambda __lowerCamelCase : protected[re.escape(m.group(0 ) )] , __lowerCamelCase )
lowercase__ : Optional[int] = torch.cat(__lowerCamelCase )
return new_state_dict
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase_ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase_ = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase_ = load_file(unet_path, device='cpu')
else:
lowerCAmelCase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase_ = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase_ = load_file(vae_path, device='cpu')
else:
lowerCAmelCase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase_ = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase_ = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase_ = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase_ = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase_ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase_ = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase_ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase_ = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase_ = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase_ = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase_ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase_ = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase_ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase_ = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase_ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase_ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase_ = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 302 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = DistilBertTokenizer
lowerCAmelCase : int = DistilBertTokenizerFast
lowerCAmelCase : Any = True
@slow
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : int = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_UpperCAmelCase : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 234 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = 'Muhammad Umer Farooq'
lowerCamelCase__ = 'MIT'
lowerCamelCase__ = '1.0.0'
lowerCamelCase__ = 'Muhammad Umer Farooq'
lowerCamelCase__ = '[email protected]'
lowerCamelCase__ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : list[str] = []
_UpperCAmelCase : List[str] = domain
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : list[tuple[str, str | None]] ) ->None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCAmelCase : Dict = parse.urljoin(self.domain , lowerCamelCase__ )
self.urls.append(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
return ".".join(get_sub_domain_name(__lowerCAmelCase ).split("." )[-2:] )
def __lowerCAmelCase (__lowerCAmelCase ):
return parse.urlparse(__lowerCAmelCase ).netloc
def __lowerCAmelCase (__lowerCAmelCase = "https://github.com" ):
_UpperCAmelCase : List[str] = get_domain_name(__lowerCAmelCase )
# Initialize the parser
_UpperCAmelCase : int = Parser(__lowerCAmelCase )
try:
# Open URL
_UpperCAmelCase : Any = requests.get(__lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCAmelCase : Any = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCAmelCase : List[str] = requests.get(__lowerCAmelCase )
# Get the valid email.
_UpperCAmelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url('https://github.com')
print(F'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 234 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case__ ( snake_case_ ):
_snake_case : Union[str, Any] = """xglm"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[str] = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCamelCase=256008 , lowerCamelCase=2048 , lowerCamelCase=1024 , lowerCamelCase=4096 , lowerCamelCase=24 , lowerCamelCase=16 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = ffn_dim
__a = num_layers
__a = attention_heads
__a = activation_function
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = layerdrop
__a = init_std
__a = scale_embedding # scale factor will be sqrt(d_model) if True
__a = use_cache
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
| 268 | """simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self , lowerCamelCase=None ):
__a = {}
if top_k is not None:
__a = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase , **lowerCamelCase ):
return super().__call__(lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = load_image(lowerCamelCase )
__a = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def a__ ( self , lowerCamelCase ):
__a = self.model(**lowerCamelCase )
return model_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
__a = self.model.config.num_labels
if self.framework == "pt":
__a = model_outputs.logits.softmax(-1 )[0]
__a , __a = probs.topk(lowerCamelCase )
elif self.framework == "tf":
__a = stable_softmax(model_outputs.logits , axis=-1 )[0]
__a = tf.math.top_k(lowerCamelCase , k=lowerCamelCase )
__a , __a = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__a = scores.tolist()
__a = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 268 | 1 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : x[0] / x[1] , reverse=__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase_ : Optional[Any] = list(accumulate(__UpperCamelCase ) )
lowerCAmelCase_ : Tuple = bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ = TypeVar("""_T""")
class __lowerCamelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Optional[int] , a_ : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : str ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[Any] ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def lowerCamelCase ( self : List[str] , a_ : _T ):
self._stacka.append(a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : int = self._stacka.pop
lowerCAmelCase_ : Any = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCamelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCamelCase__ , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCamelCase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCamelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
UpperCAmelCase = parser.parse_args()
return args
def _a ( _snake_case ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCamelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCamelCase__ )
return tpu
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = 0
for file in file_list:
UpperCAmelCase = file.split("""/""" )[-1]
UpperCAmelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase__ ).group(1 )
UpperCAmelCase = int(lowerCamelCase__ )
num_samples += sample_count
return num_samples
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = count_samples(lowerCamelCase__ )
UpperCAmelCase = tf.data.Dataset.from_tensor_slices(lowerCamelCase__ )
if shuffle:
UpperCAmelCase = dataset.shuffle(len(lowerCamelCase__ ) )
UpperCAmelCase = tf.data.TFRecordDataset(lowerCamelCase__ , num_parallel_reads=lowerCamelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase__ ) )
UpperCAmelCase = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase = dataset.batch(lowerCamelCase__ , drop_remainder=lowerCamelCase__ )
UpperCAmelCase = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
UpperCAmelCase = dataset.prefetch(lowerCamelCase__ )
return dataset
def _a ( _snake_case ):
"""simple docstring"""
if not args.no_tpu:
UpperCAmelCase = initialize_tpu(lowerCamelCase__ )
UpperCAmelCase = tf.distribute.TPUStrategy(lowerCamelCase__ )
else:
UpperCAmelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase = count_samples(lowerCamelCase__ )
UpperCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase = TFAutoModelForMaskedLM.from_config(lowerCamelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase , UpperCAmelCase = create_optimizer(
num_train_steps=lowerCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCamelCase__ , metrics=["""accuracy"""] )
def decode_fn(_snake_case ):
UpperCAmelCase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCamelCase__ , lowerCamelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCamelCase__ , return_tensors="""tf""" )
def mask_with_collator(_snake_case ):
# TF really needs an isin() function
UpperCAmelCase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
UpperCAmelCase , UpperCAmelCase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase__ , )
return batch
UpperCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , )
UpperCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase__ ) )
model.fit(
lowerCamelCase__ , validation_data=lowerCamelCase__ , epochs=args.num_epochs , callbacks=lowerCamelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_UpperCamelCase = parse_args()
main(args)
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234 | 0 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ = 5_0_0_0_0_0
lowercase__ , lowercase__ = os.path.split(__file__)
lowercase__ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
'''simple docstring'''
snake_case : str = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
snake_case : Tuple = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
snake_case : str = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[Any] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
snake_case : str = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ )
snake_case : str = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ ):
return tokenizer(examples['''text'''] )
snake_case : List[Any] = map(SCREAMING_SNAKE_CASE__ )
snake_case : Tuple = map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
snake_case : Any = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''numpy''' ):
snake_case : List[Any] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''pandas''' ):
snake_case : Optional[int] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
snake_case : List[Any] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
snake_case : List[str] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
snake_case : Any = map(SCREAMING_SNAKE_CASE__ , function=SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 83 |
'''simple docstring'''
from collections.abc import Generator
def _UpperCamelCase ( ) -> Generator[int, None, None]:
'''simple docstring'''
snake_case ,snake_case : Tuple = 0, 1
while True:
snake_case ,snake_case : List[Any] = b, a + b
yield b
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 1000 ) -> int:
'''simple docstring'''
snake_case : Optional[int] = 1
snake_case : List[Any] = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 83 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :List[Any] = jax.device_count()
lowerCAmelCase_ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Optional[Any] = replicate(__A )
lowerCAmelCase_ :Union[str, Any] = shard(__A )
lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2"""
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :Optional[int] = scheduler_params
lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = jax.device_count()
lowerCAmelCase_ :str = num_samples * [prompt]
lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Tuple = replicate(__A )
lowerCAmelCase_ :Optional[int] = shard(__A )
lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 84 | 1 |
from math import pow
def lowerCAmelCase__ ( _a : int , _a : int , _a : int , _a : int , _a : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case_ : int = int(pow(_a , _a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case_ , snake_case_ : Optional[int] = backtrack(
_a , _a , current_number + 1 , _a , _a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case_ , snake_case_ : Optional[int] = backtrack(
_a , _a , current_number + 1 , _a , _a )
return current_sum, solutions_count
def lowerCAmelCase__ ( _a : int , _a : int ):
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(_a , _a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 36 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = PriorTransformer
__snake_case : Optional[int] = "hidden_states"
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return (4, 8)
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
return (4, 8)
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
_SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
_SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
_SCREAMING_SNAKE_CASE = self.get_dummy_seed_input()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = output[0, :5].flatten().cpu()
print(UpperCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_SCREAMING_SNAKE_CASE = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: str , UpperCAmelCase_: List[str]=1 , UpperCAmelCase_: int=768 , UpperCAmelCase_: List[Any]=77 , UpperCAmelCase_: str=0 ):
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = embedding_dim
_SCREAMING_SNAKE_CASE = num_embeddings
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase ( self: Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.get_dummy_seed_input(seed=UpperCAmelCase_ )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )[0]
assert list(sample.shape ) == [1, 768]
_SCREAMING_SNAKE_CASE = sample[0, :8].flatten().cpu()
print(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
| 306 |
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def UpperCamelCase__ ( *_snake_case , **_snake_case ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a ( unittest.TestCase ):
snake_case__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
import datasets
lowerCAmelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowerCAmelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCAmelCase = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCAmelCase = AutoModelForObjectDetection.from_pretrained(_a )
lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_a )
lowerCAmelCase = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
lowerCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
lowerCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'facebook/detr-resnet-50'
lowerCAmelCase = AutoModelForObjectDetection.from_pretrained(_a )
lowerCAmelCase = AutoFeatureExtractor.from_pretrained(_a )
lowerCAmelCase = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
lowerCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
lowerCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'facebook/detr-resnet-50'
lowerCAmelCase = pipeline('object-detection' , model=_a )
lowerCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
lowerCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 0.9_985
lowerCAmelCase = 'facebook/detr-resnet-50'
lowerCAmelCase = pipeline('object-detection' , model=_a )
lowerCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCAmelCase = 0.9_993
lowerCAmelCase = pipeline('object-detection' , model=_a , threshold=_a )
lowerCAmelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 355 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309 | 0 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = list[tuple[int, int]]
SCREAMING_SNAKE_CASE__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase :
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = pos_x
lowerCAmelCase = pos_y
lowerCAmelCase = (pos_y, pos_x)
lowerCAmelCase = goal_x
lowerCAmelCase = goal_y
lowerCAmelCase = g_cost
lowerCAmelCase = parent
lowerCAmelCase = self.calculate_heuristic()
def _snake_case ( self ) -> float:
lowerCAmelCase = abs(self.pos_x - self.goal_x )
lowerCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , lowercase ) -> bool:
return self.f_cost < other.f_cost
class lowercase :
def __init__( self , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase )
lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowercase )
lowerCAmelCase = [self.start]
lowerCAmelCase = []
lowerCAmelCase = False
def _snake_case ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase = True
return self.retrace_path(lowercase )
self.closed_nodes.append(lowercase )
lowerCAmelCase = self.get_successors(lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase )
else:
# retrieve the best current path
lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase )
else:
self.open_nodes.append(lowercase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self , lowercase ) -> list[Node]:
lowerCAmelCase = []
for action in delta:
lowerCAmelCase = parent.pos_x + action[1]
lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase , lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase , ) )
return successors
def _snake_case ( self , lowercase ) -> Path:
lowerCAmelCase = node
lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = (0, 0)
SCREAMING_SNAKE_CASE__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
SCREAMING_SNAKE_CASE__ = GreedyBestFirst(init, goal)
SCREAMING_SNAKE_CASE__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
SCREAMING_SNAKE_CASE__ = 2
for elem in grid:
print(elem)
| 46 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46 | 1 |
'''simple docstring'''
import os
lowercase__ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def _lowerCAmelCase ( __snake_case : str ) -> int:
__A : str = 0
__A : int = 0
while index < len(__snake_case ) - 1:
__A : Tuple = SYMBOLS[numerals[index]]
__A : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase ( __snake_case : int ) -> str:
__A : str = ''
__A : Optional[int] = num // 10_00
numerals += m_count * "M"
num %= 10_00
__A : Union[str, Any] = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__A : Union[str, Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase ( __snake_case : str = "/p089_roman.txt" ) -> int:
__A : Optional[Any] = 0
with open(os.path.dirname(__snake_case ) + roman_numerals_filename ) as filea:
__A : str = filea.readlines()
for line in lines:
__A : Any = line.strip()
__A : int = parse_roman_numerals(__snake_case )
__A : Tuple = generate_roman_numerals(__snake_case )
savings += len(__snake_case ) - len(__snake_case )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""") | 190 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta-prelayernorm'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = vocab_size
__A : List[str] = hidden_size
__A : Tuple = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Tuple = hidden_act
__A : Optional[Any] = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Any = type_vocab_size
__A : Optional[int] = initializer_range
__A : List[str] = layer_norm_eps
__A : Optional[int] = position_embedding_type
__A : Dict = use_cache
__A : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 190 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCAmelCase = model.generate(UpperCAmelCase__ , max_new_tokens=1_0 , do_sample=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1_0 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCAmelCase = model.generate(UpperCAmelCase__ , max_new_tokens=1_0 , do_sample=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase = TextIteratorStreamer(UpperCAmelCase__ )
lowerCAmelCase = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
lowerCAmelCase = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> str:
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCAmelCase = model.generate(UpperCAmelCase__ , max_new_tokens=1_0 , do_sample=UpperCAmelCase__ )
lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(UpperCAmelCase__ , skip_prompt=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1_0 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase = AutoTokenizer.from_pretrained('distilgpt2' )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(UpperCAmelCase__ )
lowerCAmelCase = -1
lowerCAmelCase = torch.ones((1, 5) , device=UpperCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase__ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCAmelCase = TextIteratorStreamer(UpperCAmelCase__ , timeout=0.001 )
lowerCAmelCase = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase__ ):
lowerCAmelCase = ''
for new_text in streamer:
streamer_text += new_text
| 4 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase = '''Create a default config file for Accelerate with only a few flags set.'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE="no" , __SCREAMING_SNAKE_CASE = default_json_config_file , __SCREAMING_SNAKE_CASE = False ):
lowercase = Path(__SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowercase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowercase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
lowercase = num_gpus
lowercase = False
if num_gpus > 1:
lowercase = 'MULTI_GPU'
else:
lowercase = 'NO'
elif is_xpu_available() and use_xpu:
lowercase = torch.xpu.device_count()
lowercase = num_xpus
lowercase = False
if num_xpus > 1:
lowercase = 'MULTI_XPU'
else:
lowercase = 'NO'
elif is_npu_available():
lowercase = torch.npu.device_count()
lowercase = num_npus
lowercase = False
if num_npus > 1:
lowercase = 'MULTI_NPU'
else:
lowercase = 'NO'
else:
lowercase = 0
lowercase = True
lowercase = 1
lowercase = 'NO'
lowercase = ClusterConfig(**__SCREAMING_SNAKE_CASE )
config.to_json_file(__SCREAMING_SNAKE_CASE )
return path
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = parser.add_parser('default' , parents=__SCREAMING_SNAKE_CASE , help=__SCREAMING_SNAKE_CASE , formatter_class=__SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=__SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 195 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCAmelCase (__lowerCAmelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def __lowerCAmelCase (__lowerCAmelCase ):
# word like '180' or '身高' or '神'
for char in word:
_UpperCAmelCase : Union[str, Any] = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = set()
for token in tokens:
_UpperCAmelCase : Union[str, Any] = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
_UpperCAmelCase : str = list(__lowerCAmelCase )
return word_list
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if not chinese_word_set:
return bert_tokens
_UpperCAmelCase : int = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
_UpperCAmelCase : Optional[int] = bert_tokens
_UpperCAmelCase : Dict = 0, len(__lowerCAmelCase )
while start < end:
_UpperCAmelCase : List[Any] = True
if is_chinese(bert_word[start] ):
_UpperCAmelCase : Optional[Any] = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
_UpperCAmelCase : Tuple = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCAmelCase : Any = "##" + bert_word[j]
_UpperCAmelCase : List[Any] = start + i
_UpperCAmelCase : Dict = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
_UpperCAmelCase : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
_UpperCAmelCase : Union[str, Any] = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
_UpperCAmelCase : List[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = []
for id in input_ids:
_UpperCAmelCase : Optional[Any] = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
_UpperCAmelCase : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def __lowerCAmelCase (__lowerCAmelCase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[int] = f.readlines()
_UpperCAmelCase : Union[str, Any] = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCAmelCase : str = LTP(args.ltp ) # faster in GPU device
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained(args.bert )
_UpperCAmelCase : Any = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[int] = [json.dumps(__lowerCAmelCase ) + "\n" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 364 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase = logging.get_logger(__name__)
class __magic_name__ ( __a ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['''pixel_values''']
def __init__( self, lowercase_ = True, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = 8, **lowercase_, ) -> None:
"""simple docstring"""
super().__init__(**__a )
a__ =do_rescale
a__ =rescale_factor
a__ =do_pad
a__ =pad_size
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_ ) -> np.ndarray:
"""simple docstring"""
return rescale(__a, scale=__a, data_format=__a, **__a )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_ = None ) -> Dict:
"""simple docstring"""
a__ =get_image_size(__a )
a__ =(old_height // size + 1) * size - old_height
a__ =(old_width // size + 1) * size - old_width
return pad(__a, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=__a )
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = ChannelDimension.FIRST, **lowercase_, ) -> Union[str, Any]:
"""simple docstring"""
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_pad if do_pad is not None else self.do_pad
a__ =pad_size if pad_size is not None else self.pad_size
a__ =make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
a__ =[to_numpy_array(__a ) for image in images]
if do_rescale:
a__ =[self.rescale(image=__a, scale=__a ) for image in images]
if do_pad:
a__ =[self.pad(__a, size=__a ) for image in images]
a__ =[to_channel_dimension_format(__a, __a ) for image in images]
a__ ={"""pixel_values""": images}
return BatchFeature(data=__a, tensor_type=__a )
| 188 |
lowerCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def snake_case_ ( ):
__lowercase : List[str] = input("""Enter message: """ )
__lowercase : int = input("""Enter key [alphanumeric]: """ )
__lowercase : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowercase : Optional[int] = """encrypt"""
__lowercase : Dict = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("""d""" ):
__lowercase : Union[str, Any] = """decrypt"""
__lowercase : Optional[int] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"\n{mode.title()}ed message:" )
print(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """encrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """decrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = []
__lowercase : Tuple = 0
__lowercase : Dict = key.upper()
for symbol in message:
__lowercase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
__lowercase : str = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main() | 233 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deberta-v2'
def __init__( self : Dict , __a : List[str]=12_81_00 , __a : int=15_36 , __a : Tuple=24 , __a : Dict=24 , __a : Any=61_44 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Any=0.1 , __a : Optional[Any]=5_12 , __a : int=0 , __a : Tuple=0.02 , __a : Tuple=1e-7 , __a : List[str]=False , __a : Tuple=-1 , __a : Dict=0 , __a : Tuple=True , __a : Dict=None , __a : int=0 , __a : Tuple="gelu" , **__a : Optional[Any] , ):
super().__init__(**__a )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = relative_attention
_a = max_relative_positions
_a = pad_token_id
_a = position_biased_input
# Backwards compatibility
if type(__a ) == str:
_a = [x.strip() for x in pos_att_type.lower().split("|" )]
_a = pos_att_type
_a = vocab_size
_a = layer_norm_eps
_a = kwargs.get("pooler_hidden_size" , __a )
_a = pooler_dropout
_a = pooler_hidden_act
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def UpperCamelCase__ ( self : List[Any] ):
return 12
def UpperCamelCase__ ( self : Tuple , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , __a : "PreTrainedTokenizerBase" = None , ):
_a = super().generate_dummy_inputs(preprocessor=__a , framework=__a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.