code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa'''
lowerCAmelCase = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowerCAmelCase = '''document_qa'''
lowerCAmelCase = AutoProcessor
lowerCAmelCase = VisionEncoderDecoderModel
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''text''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__A : List[Any] = task_prompt.replace('{user_input}' , _UpperCAmelCase)
__A : Tuple = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt').input_ids
__A : Any = self.pre_processor(_UpperCAmelCase , return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.pre_processor.batch_decode(_UpperCAmelCase)[0]
__A : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '')
__A : int = sequence.replace(self.pre_processor.tokenizer.pad_token , '')
__A : Any = re.sub(R'<.*?>' , '' , _UpperCAmelCase , count=1).strip() # remove first task start token
__A : List[str] = self.pre_processor.tokenajson(_UpperCAmelCase)
return sequence["answer"] | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase__ : List[Any] = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=32):
'''simple docstring'''
set_seed(0)
__A : Union[str, Any] = UNetaDModel(sample_size=_UpperCAmelCase , in_channels=3 , out_channels=3)
__A : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.0001)
return model, optimizer
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__A : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=_UpperCAmelCase , )
__A : str = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=_UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__A : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(_UpperCAmelCase) for _ in range(4)]
__A : Optional[int] = [torch.randn((4, 3, 32, 32)).to(_UpperCAmelCase) for _ in range(4)]
__A : Dict = [torch.randint(0 , 1000 , (4,)).long().to(_UpperCAmelCase) for _ in range(4)]
# train with a DDPM scheduler
__A ,__A : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(_UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
__A : Dict = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__A : Tuple = model(_UpperCAmelCase , timesteps[i]).sample
__A : List[str] = torch.nn.functional.mse_loss(_UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__A ,__A : Any = self.get_model_optimizer(resolution=32)
model.train().to(_UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
__A : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__A : int = model(_UpperCAmelCase , timesteps[i]).sample
__A : int = torch.nn.functional.mse_loss(_UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5))
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5)) | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE (yaml.SafeLoader ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
__A : Dict = [tuple(_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else key for key in keys]
__A : str = Counter(_UpperCAmelCase)
__A : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[int] = super().construct_mapping(_UpperCAmelCase , deep=_UpperCAmelCase)
self._check_no_duplicates_on_constructed_node(_UpperCAmelCase)
return mapping
def _lowerCAmelCase ( __snake_case : str ) -> Tuple[Optional[str], str]:
__A : Union[str, Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__A : Optional[int] = full_content[1:].index('---' ) + 1
__A : int = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__snake_case )
class SCREAMING_SNAKE_CASE (a__ ):
# class attributes
lowerCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
with open(_UpperCAmelCase , encoding='utf-8') as readme_file:
__A ,__A : int = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(_UpperCAmelCase)
else:
return cls()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if path.exists():
with open(_UpperCAmelCase , encoding='utf-8') as readme_file:
__A : Dict = readme_file.read()
else:
__A : Optional[Any] = None
__A : str = self._to_readme(_UpperCAmelCase)
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as readme_file:
readme_file.write(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None):
'''simple docstring'''
if readme_content is not None:
__A ,__A : Any = _split_yaml_from_readme(_UpperCAmelCase)
__A : Union[str, Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__A : Optional[int] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = yaml.load(_UpperCAmelCase , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
__A : Tuple = {
(key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_UpperCAmelCase , allow_unicode=_UpperCAmelCase , encoding='utf-8' , ).decode('utf-8')
lowercase__ : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase__ : int = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowercase__ : Tuple = ap.parse_args()
lowercase__ : int = Path(args.readme_filepath)
lowercase__ : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE (a__ ):
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__A : List[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__A : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__A : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase)
BertModel.from_pretrained(_UpperCAmelCase)
BertTokenizer.from_pretrained(_UpperCAmelCase)
pipeline(task='fill-mask' , model=_UpperCAmelCase)
# baseline - just load from_pretrained with normal network
__A : Dict = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__A : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__A : Optional[Any] = '1'
__A : Tuple = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__A : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__A : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__A : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase)
BertModel.from_pretrained(_UpperCAmelCase)
BertTokenizer.from_pretrained(_UpperCAmelCase)
pipeline(task='fill-mask' , model=_UpperCAmelCase)
# baseline - just load from_pretrained with normal network
__A : Tuple = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__A : Dict = self.get_env()
__A : Optional[Any] = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
__A : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
__A : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
__A : List[Any] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__A : List[Any] = self.get_env()
__A : List[Any] = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# next emulate no network
__A : Dict = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__A : Union[str, Any] = '1'
__A : Union[str, Any] = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = '\nfrom transformers import pipeline\n '
__A : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
__A : Optional[int] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
__A : Optional[int] = self.get_env()
__A : int = '1'
__A : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run])]
__A : Optional[Any] = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '') , )
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = '\nfrom transformers import AutoModel\n '
__A : str = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
__A : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__A : int = self.get_env()
__A : Optional[Any] = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__A : List[str] = '1'
__A : Optional[int] = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode()) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = row
__A : Any = col
__A : Union[str, Any] = graph
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__A : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
__A : str = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self): # And finally, count all islands.
'''simple docstring'''
__A : Tuple = [[False for j in range(self.COL)] for i in range(self.ROW)]
__A : Optional[Any] = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
count += 1
return count | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowercase__ : str = True
from torch.cuda.amp import autocast
lowercase__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowerCAmelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowerCAmelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowerCAmelCase = field(
default=0.999_995 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _lowerCAmelCase ( __snake_case : ModelArguments , __snake_case : TrainingArguments ) -> Optional[int]:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__A : Union[str, Any] = logging.WARNING
if model_args.verbose_logging:
__A : Optional[int] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__A : str = logging.INFO
logger.setLevel(__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCAmelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCAmelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCAmelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase = field(
default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = "longest"
lowerCAmelCase = None
lowerCAmelCase = None
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.feature_extractor.pad(
_UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__A : Tuple = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1])
__A : Any = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__A : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1)).to(
torch.long)
__A : Dict = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
__A : List[str] = 1
__A : Optional[Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
__A : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCAmelCase , min_masks=2 , )
return batch
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , *_UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
__A : Tuple = 0
__A : List[Any] = max_gumbel_temp
__A : int = min_gumbel_temp
__A : Union[str, Any] = gumbel_temp_decay
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
model.train()
__A : Any = self._prepare_inputs(_UpperCAmelCase)
if self.use_amp:
with autocast():
__A : Any = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase)
else:
__A : Tuple = self.compute_loss(_UpperCAmelCase , _UpperCAmelCase)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__A : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__A : List[Any] = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']')
if self.args.gradient_accumulation_steps > 1:
__A : Dict = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCAmelCase).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCAmelCase , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCAmelCase)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def _lowerCAmelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__A ,__A ,__A : Any = parser.parse_args_into_dataclasses()
configure_logger(__snake_case , __snake_case )
# Downloading and loading a dataset from the hub.
__A : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__A : Optional[int] = DatasetDict()
__A : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
__A : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__A : Dict = DatasetDict()
__A : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
__A : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__A : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case )
def prepare_dataset(__snake_case : Optional[Any] ):
# check that all files have the correct sampling rate
__A ,__A : Tuple = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__A : List[str] = datasets.map(
__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
__A : List[str] = vectorized_datasets.filter(
lambda __snake_case : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : int ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__A : str = vectorized_datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__A : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
__A : int = WavaVecaForPreTraining(__snake_case )
__A : Optional[int] = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case )
__A : Dict = WavaVecaPreTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Union[str, Any] = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , *,
_UpperCAmelCase = 4 , _UpperCAmelCase = 768 , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
__A : str = nn.Parameter(torch.zeros(_UpperCAmelCase))
# parameters for additional clip time embeddings
__A : List[str] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
# parameters for encoder hidden states
__A : Dict = clip_extra_context_tokens
__A : List[Any] = nn.Linear(
_UpperCAmelCase , self.clip_extra_context_tokens * cross_attention_dim)
__A : Dict = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = nn.LayerNorm(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *, _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__A : Optional[int] = image_embeddings.shape[0]
__A : List[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
__A : Dict = classifier_free_guidance_embeddings.expand(
_UpperCAmelCase , -1)
__A : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__A : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__A : Any = self.embedding_proj(_UpperCAmelCase)
__A : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(_UpperCAmelCase)
__A : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__A : Tuple = self.clip_extra_context_tokens_proj(_UpperCAmelCase)
__A : int = clip_extra_context_tokens.reshape(_UpperCAmelCase , -1 , self.clip_extra_context_tokens)
__A : Optional[Any] = clip_extra_context_tokens.permute(0 , 2 , 1)
__A : Union[str, Any] = self.encoder_hidden_states_proj(_UpperCAmelCase)
__A : Optional[int] = self.text_encoder_hidden_states_norm(_UpperCAmelCase)
__A : Any = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[Any]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : str=True ) -> Dict:
model.train()
__A : List[Any] = model(__snake_case )
__A : int = F.mse_loss(__snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__snake_case )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Tuple=False ) -> Dict:
set_seed(42 )
__A : str = RegressionModel()
__A : Dict = deepcopy(__snake_case )
__A : Union[str, Any] = RegressionDataset(length=80 )
__A : List[Any] = DataLoader(__snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
__A : List[Any] = AdamW(params=model.parameters() , lr=1e-3 )
__A : List[Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__A : Dict = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 )
__A : Union[str, Any] = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 )
# Make a copy of `model`
if sched:
__A ,__A ,__A ,__A : Optional[int] = accelerator.prepare(__snake_case , __snake_case , __snake_case , __snake_case )
else:
__A ,__A : int = accelerator.prepare(__snake_case , __snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCAmelCase ( __snake_case : List[Any] ) -> List[Any]:
# Test when on a single CPU or GPU that the context manager does nothing
__A ,__A ,__A : Dict = get_training_setup(__snake_case )
# Use a single batch
__A ,__A : Optional[Any] = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__A ,__A : int = accelerator.gather((ddp_input, ddp_target) )
__A ,__A : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__A : Union[str, Any] = ddp_input[torch.randperm(len(__snake_case ) )]
def _lowerCAmelCase ( __snake_case : Any ) -> Dict:
# Test on distributed setup that context manager behaves properly
__A ,__A ,__A : Tuple = get_training_setup(__snake_case )
# Use a single batch
__A ,__A : Any = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__A ,__A : Any = accelerator.gather((ddp_input, ddp_target) )
__A ,__A : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__A : Any = ddp_input[torch.randperm(len(__snake_case ) )]
def _lowerCAmelCase ( __snake_case : str=False , __snake_case : Any=False ) -> Dict:
__A : Optional[Any] = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__A ,__A ,__A : Tuple = get_training_setup(__snake_case )
for iteration, batch in enumerate(__snake_case ):
__A ,__A : List[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
__A ,__A : Dict = accelerator.gather((ddp_input, ddp_target) )
__A ,__A : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__A : List[Any] = ddp_input[torch.randperm(len(__snake_case ) )]
GradientState._reset_state()
def _lowerCAmelCase ( __snake_case : Optional[int]=False , __snake_case : str=False ) -> Union[str, Any]:
__A : int = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__A ,__A ,__A ,__A ,__A ,__A ,__A : Union[str, Any] = get_training_setup(__snake_case , __snake_case )
for iteration, batch in enumerate(__snake_case ):
__A ,__A : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
__A ,__A : Tuple = accelerator.gather((ddp_input, ddp_target) )
__A ,__A : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
__A : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _lowerCAmelCase ( ) -> Dict:
__A : Tuple = Accelerator()
__A : Tuple = RegressionDataset(length=80 )
__A : str = DataLoader(__snake_case , batch_size=16 )
__A : Tuple = RegressionDataset(length=96 )
__A : Tuple = DataLoader(__snake_case , batch_size=16 )
__A ,__A : List[str] = accelerator.prepare(__snake_case , __snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if iteration < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if batch_num < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCAmelCase ( ) -> Any:
__A : List[Any] = Accelerator()
__A : List[str] = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(__snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(__snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(__snake_case , __snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(__snake_case , __snake_case )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__A : Optional[Any] = str(bin(__snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__A : List[str] = str(bin(__snake_case ) )[2:]
if shift_amount >= len(__snake_case ):
return "0b0"
__A : Any = binary_number[: len(__snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number >= 0: # Get binary representation of positive number
__A : Optional[Any] = '0' + str(bin(__snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__A : Union[str, Any] = len(bin(__snake_case )[3:] ) # Find 2's complement of number
__A : Any = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
__A : Optional[Any] = (
'1' + '0' * (binary_number_length - len(__snake_case )) + binary_number
)
if shift_amount >= len(__snake_case ):
return "0b" + binary_number[0] * len(__snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _lowerCAmelCase ( __snake_case : int , __snake_case : Tuple ) -> List[str]:
__A : Tuple = XCLIPTextConfig()
# derive patch size from model name
__A : Dict = model_name.find('patch' )
__A : Dict = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
__A : List[str] = XCLIPVisionConfig(patch_size=__snake_case , num_frames=__snake_case )
if "large" in model_name:
__A : Dict = 7_68
__A : List[Any] = 30_72
__A : int = 12
__A : Tuple = 10_24
__A : str = 40_96
__A : Any = 16
__A : str = 24
__A : Dict = 7_68
__A : Any = 30_72
if model_name == "xclip-large-patch14-16-frames":
__A : List[str] = 3_36
__A : List[str] = XCLIPConfig.from_text_vision_configs(__snake_case , __snake_case )
if "large" in model_name:
__A : List[str] = 7_68
return config
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
# text encoder
if name == "token_embedding.weight":
__A : str = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
__A : Any = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
__A : List[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__A : Union[str, Any] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__A : Any = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__A : Optional[Any] = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
__A : Optional[Any] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
__A : Union[str, Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
__A : str = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
__A : Optional[int] = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
__A : Optional[Any] = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
__A : Tuple = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
__A : int = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
__A : List[Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
__A : List[Any] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
__A : Optional[int] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
__A : Any = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
__A : List[Any] = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
__A : int = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
__A : Union[str, Any] = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
__A : Optional[int] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
__A : Any = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Tuple:
for key in orig_state_dict.copy().keys():
__A : Dict = orig_state_dict.pop(__snake_case )
if "attn.in_proj" in key:
__A : Dict = key.split('.' )
if key.startswith('visual' ):
__A : Dict = key_split[3]
__A : Tuple = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__A : Dict = val[
:dim, :
]
__A : Tuple = val[
dim : dim * 2, :
]
__A : Optional[Any] = val[
-dim:, :
]
else:
__A : Optional[Any] = val[
:dim
]
__A : Optional[int] = val[
dim : dim * 2
]
__A : Tuple = val[
-dim:
]
else:
if "weight" in key:
__A : Dict = val[
:dim, :
]
__A : Optional[Any] = val[
dim : dim * 2, :
]
__A : Any = val[
-dim:, :
]
else:
__A : Union[str, Any] = val[:dim]
__A : Union[str, Any] = val[
dim : dim * 2
]
__A : Optional[int] = val[-dim:]
elif key.startswith('mit' ):
__A : List[str] = key_split[2]
__A : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__A : Optional[Any] = val[:dim, :]
__A : Optional[Any] = val[dim : dim * 2, :]
__A : List[Any] = val[-dim:, :]
else:
__A : str = val[:dim]
__A : Dict = val[dim : dim * 2]
__A : Tuple = val[-dim:]
else:
__A : Union[str, Any] = key_split[2]
__A : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
__A : List[str] = val[:dim, :]
__A : int = val[
dim : dim * 2, :
]
__A : Optional[Any] = val[-dim:, :]
else:
__A : Union[str, Any] = val[:dim]
__A : Tuple = val[
dim : dim * 2
]
__A : str = val[-dim:]
else:
__A : Dict = rename_key(__snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__A : List[Any] = val.T
__A : Any = val
return orig_state_dict
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
if num_frames == 8:
__A : Any = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
__A : List[str] = 'eating_spaghetti.npy'
elif num_frames == 32:
__A : int = 'eating_spaghetti_32_frames.npy'
__A : Any = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=__snake_case , repo_type='dataset' , )
__A : Any = np.load(__snake_case )
return list(__snake_case )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Tuple=None , __snake_case : List[Any]=False ) -> Union[str, Any]:
__A : List[Any] = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
__A : List[Any] = model_to_url[model_name]
__A : List[Any] = 8
if "16-frames" in model_name:
__A : int = 16
elif "shot" in model_name:
__A : Optional[int] = 32
__A : List[str] = get_xclip_config(__snake_case , __snake_case )
__A : str = XCLIPModel(__snake_case )
model.eval()
if "drive" in checkpoint_url:
__A : List[str] = 'pytorch_model.bin'
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__A : int = torch.load(__snake_case , map_location='cpu' )['model']
else:
__A : Optional[Any] = torch.hub.load_state_dict_from_url(__snake_case )['model']
__A : Dict = convert_state_dict(__snake_case , __snake_case )
__A : Union[str, Any] = XCLIPModel(__snake_case )
__A ,__A : List[str] = model.load_state_dict(__snake_case , strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__A : List[str] = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
__A : Optional[Any] = VideoMAEImageProcessor(size=__snake_case )
__A : Dict = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
__A : Optional[Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
__A : str = XCLIPProcessor(image_processor=__snake_case , tokenizer=__snake_case )
__A : List[Any] = prepare_video(__snake_case )
__A : List[Any] = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=__snake_case , return_tensors='pt' , padding=__snake_case )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
__A : Optional[int] = model(**__snake_case )
# Verify outputs
__A : List[str] = outputs.logits_per_video
__A : Tuple = logits_per_video.softmax(dim=1 )
print('Probs:' , __snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__A : List[Any] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
__A : str = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
__A : List[Any] = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
__A : int = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
__A : Any = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
__A : Any = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__A : Optional[int] = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__A : int = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__A : List[Any] = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__A : Optional[int] = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__A : int = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__A : Dict = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__A : Optional[Any] = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__A : List[Any] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__A : str = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__A : str = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__A : List[Any] = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__A : Optional[Any] = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f'Model name {model_name} not supported' )
assert torch.allclose(__snake_case , __snake_case , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(__snake_case , organization='nielsr' )
processor.push_to_hub(__snake_case , organization='nielsr' )
slow_tokenizer.push_to_hub(__snake_case , organization='nielsr' )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : Tuple = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 2, 1] , _UpperCAmelCase=[2, 2, 4] , _UpperCAmelCase=2 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=8 , _UpperCAmelCase=["stage1", "stage2", "stage3"] , _UpperCAmelCase=[1, 2, 3] , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Dict = batch_size
__A : Any = image_size
__A : List[str] = patch_size
__A : List[str] = num_channels
__A : Any = embed_dim
__A : Dict = depths
__A : List[Any] = num_heads
__A : str = window_size
__A : Union[str, Any] = mlp_ratio
__A : str = qkv_bias
__A : Dict = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : str = hidden_act
__A : str = use_absolute_embeddings
__A : str = patch_norm
__A : Dict = layer_norm_eps
__A : List[str] = initializer_range
__A : str = is_training
__A : Union[str, Any] = scope
__A : int = use_labels
__A : Any = type_sequence_label_size
__A : List[str] = encoder_stride
__A : str = out_features
__A : int = out_indices
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : Optional[int] = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskFormerSwinModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Any = model(_UpperCAmelCase)
__A : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskFormerSwinBackbone(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : List[Any] = model(_UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [16, 32, 64])
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase):
__A : Optional[int] = ['stem']
__A : Dict = MaskFormerSwinBackbone(config=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.prepare_config_and_inputs()
__A ,__A ,__A : int = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = MaskFormerSwinModelTester(self)
__A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37)
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase)
@unittest.skip('Swin does not use inputs_embeds')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip('Swin does not support feedforward chunking')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(_UpperCAmelCase)
__A : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : str = [*signature.parameters.keys()]
__A : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[int] = outputs.hidden_states
__A : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# Swin has a different seq_length
__A : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common()
__A : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__A : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[Any] = 3
__A : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__A : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase):
__A : Any = 0
return t
def check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase={}):
with torch.no_grad():
__A : Dict = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase).to_tuple()
def recursive_check(_UpperCAmelCase , _UpperCAmelCase):
if isinstance(_UpperCAmelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase , _UpperCAmelCase):
recursive_check(_UpperCAmelCase , _UpperCAmelCase)
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(_UpperCAmelCase , _UpperCAmelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase) , set_nan_tensor_to_zero(_UpperCAmelCase) , atol=1e-5) , msg=(
'Tuple and dict output are not equal. Difference:'
F' {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:'
F' {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}. Dict has'
F' `nan`: {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}.'
) , )
recursive_check(_UpperCAmelCase , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
__A : Dict = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True})
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
__A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True})
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase , a__ ):
lowerCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = MaskFormerSwinModelTester(self)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Tuple = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__A : Optional[int] = backbone_class(_UpperCAmelCase)
backbone.to(_UpperCAmelCase)
backbone.eval()
__A : Tuple = backbone(**_UpperCAmelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _UpperCAmelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
__A : Dict = backbone(**_UpperCAmelCase , output_hidden_states=_UpperCAmelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__A ,__A ,__A : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
__A : Union[str, Any] = backbone(**_UpperCAmelCase , output_attentions=_UpperCAmelCase)
self.assertIsNotNone(outputs.attentions) | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase__ : str = logging.getLogger(__name__)
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Optional[int] ) -> Optional[int]:
__A : Tuple = np.argmax(__snake_case , axis=1 )
return np.sum(outputs == labels )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> int:
with open(__snake_case , encoding='utf_8' ) as f:
__A : Dict = csv.reader(__snake_case )
__A : Optional[Any] = []
next(__snake_case ) # skip the first line
for line in tqdm(__snake_case ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Any:
__A : List[str] = []
for dataset in encoded_datasets:
__A : Tuple = len(__snake_case )
__A : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__A : Dict = np.zeros((n_batch, 2) , dtype=np.intaa )
__A : Dict = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__A : Any = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__snake_case ):
__A : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__A : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__A : int = with_conta
__A : str = with_conta
__A : Optional[int] = len(__snake_case ) - 1
__A : List[Any] = len(__snake_case ) - 1
__A : List[Any] = with_conta
__A : Tuple = with_conta
__A : Dict = mc_label
__A : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) )
return tensor_datasets
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__snake_case , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__snake_case , type=__snake_case , required=__snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__snake_case , default='' )
parser.add_argument('--eval_dataset' , type=__snake_case , default='' )
parser.add_argument('--seed' , type=__snake_case , default=42 )
parser.add_argument('--num_train_epochs' , type=__snake_case , default=3 )
parser.add_argument('--train_batch_size' , type=__snake_case , default=8 )
parser.add_argument('--eval_batch_size' , type=__snake_case , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__snake_case , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__snake_case , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__snake_case , default=6.2_5e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__snake_case , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--lm_coef' , type=__snake_case , default=0.9 )
parser.add_argument('--n_valid' , type=__snake_case , default=3_74 )
parser.add_argument('--server_ip' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
__A : List[Any] = parser.parse_args()
print(__snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__A : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__A : List[Any] = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__snake_case , __snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__A : Optional[Any] = ['_start_', '_delimiter_', '_classify_']
__A : Tuple = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__snake_case )
__A : int = tokenizer.convert_tokens_to_ids(__snake_case )
__A : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__snake_case ) )
model.to(__snake_case )
# Load and encode the datasets
def tokenize_and_encode(__snake_case : Optional[Any] ):
if isinstance(__snake_case , __snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) )
elif isinstance(__snake_case , __snake_case ):
return obj
return [tokenize_and_encode(__snake_case ) for o in obj]
logger.info('Encoding dataset...' )
__A : Tuple = load_rocstories_dataset(args.train_dataset )
__A : List[Any] = load_rocstories_dataset(args.eval_dataset )
__A : Any = (train_dataset, eval_dataset)
__A : List[Any] = tokenize_and_encode(__snake_case )
# Compute the max input length for the Transformer
__A : Dict = model.config.n_positions // 2 - 2
__A : Any = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__A : Optional[Any] = min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__A : str = pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case )
__A ,__A : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__A : str = TensorDataset(*__snake_case )
__A : Optional[int] = RandomSampler(__snake_case )
__A : Optional[Any] = DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size )
__A : Optional[Any] = TensorDataset(*__snake_case )
__A : str = SequentialSampler(__snake_case )
__A : Tuple = DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__A : Any = args.max_steps
__A : Optional[int] = args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1
else:
__A : Any = len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__A : Optional[Any] = list(model.named_parameters() )
__A : List[Any] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__A : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__A : Optional[Any] = AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__A : List[str] = get_linear_schedule_with_warmup(
__snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case )
if args.do_train:
__A ,__A ,__A : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__A : str = 0
__A : str = 0
__A : Tuple = tqdm(__snake_case , desc='Training' )
for step, batch in enumerate(__snake_case ):
__A : Tuple = tuple(t.to(__snake_case ) for t in batch )
__A ,__A ,__A ,__A : List[str] = batch
__A : str = model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
__A : Dict = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__A : Tuple = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__A : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(__snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__A : Dict = model.module if hasattr(__snake_case , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__A : int = os.path.join(args.output_dir , __snake_case )
__A : List[str] = os.path.join(args.output_dir , __snake_case )
torch.save(model_to_save.state_dict() , __snake_case )
model_to_save.config.to_json_file(__snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__A : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__A : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__snake_case )
if args.do_eval:
model.eval()
__A ,__A : Dict = 0, 0
__A ,__A : Any = 0, 0
for batch in tqdm(__snake_case , desc='Evaluating' ):
__A : int = tuple(t.to(__snake_case ) for t in batch )
__A ,__A ,__A ,__A : Any = batch
with torch.no_grad():
__A ,__A ,__A ,__A : Any = model(
__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
__A : Optional[Any] = mc_logits.detach().cpu().numpy()
__A : List[str] = mc_labels.to('cpu' ).numpy()
__A : Union[str, Any] = accuracy(__snake_case , __snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__A : Optional[Any] = eval_loss / nb_eval_steps
__A : Any = eval_accuracy / nb_eval_examples
__A : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
__A : Any = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__A : str = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase__ : Union[str, Any] = 8.988e9 # units = N * m^s * C^-2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> dict[str, float]:
__A : List[Any] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
__A : Tuple = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__A : str = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__A : Any = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__A : str = (COULOMBS_CONSTANT * charge_product / abs(__snake_case )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a__ , )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = RobertaConfig
lowerCAmelCase = '''roberta'''
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase)
__A : Tuple = RobertaEmbeddings(_UpperCAmelCase)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a__ , )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = RobertaConfig
lowerCAmelCase = '''roberta'''
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase)
__A : List[str] = config.num_labels
__A : List[str] = config.num_hidden_layers
__A : Any = DeeRobertaModel(_UpperCAmelCase)
__A : Tuple = nn.Dropout(config.hidden_dropout_prob)
__A : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ):
'''simple docstring'''
__A : Optional[Any] = self.num_layers
try:
__A : Dict = self.roberta(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
__A : List[Any] = outputs[1]
__A : List[str] = self.dropout(_UpperCAmelCase)
__A : List[str] = self.classifier(_UpperCAmelCase)
__A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A : Optional[int] = e.message
__A : Optional[int] = e.exit_layer
__A : Tuple = outputs[0]
if not self.training:
__A : List[str] = entropy(_UpperCAmelCase)
__A : List[str] = []
__A : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A : Any = MSELoss()
__A : Any = loss_fct(logits.view(-1) , labels.view(-1))
else:
__A : Optional[int] = CrossEntropyLoss()
__A : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__A : Optional[int] = []
for highway_exit in outputs[-1]:
__A : str = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__A : int = MSELoss()
__A : Union[str, Any] = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__A : Union[str, Any] = CrossEntropyLoss()
__A : int = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(_UpperCAmelCase)
if train_highway:
__A : Optional[int] = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__A : str = (loss,) + outputs
if not self.training:
__A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> list[int]:
__A : Tuple = [0] * no_of_processes
__A : str = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__snake_case ):
__A : int = burst_time[i]
__A : str = 0
__A : Tuple = 0
__A : Optional[Any] = 9_99_99_99_99
__A : int = 0
__A : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__A : Dict = remaining_time[j]
__A : Optional[int] = j
__A : Union[str, Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__A : str = remaining_time[short]
if minm == 0:
__A : Any = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__A : List[Any] = False
# Find finish time of current process
__A : Any = increment_time + 1
# Calculate waiting time
__A : Optional[int] = finish_time - arrival_time[short]
__A : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
__A : List[str] = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : int , __snake_case : list[int] ) -> list[int]:
__A : Optional[Any] = [0] * no_of_processes
for i in range(__snake_case ):
__A : Any = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> None:
__A : Union[str, Any] = 0
__A : Optional[int] = 0
for i in range(__snake_case ):
__A : Optional[Any] = total_waiting_time + waiting_time[i]
__A : List[str] = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowercase__ : Dict = int(input())
lowercase__ : str = [0] * no_of_processes
lowercase__ : int = [0] * no_of_processes
lowercase__ : Tuple = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowercase__ , lowercase__ : Dict = map(int, input().split())
lowercase__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase__ : Any = burst_time
lowercase__ : Tuple = no_of_processes
lowercase__ : List[str] = waiting_time
lowercase__ : List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase__ : str = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs) | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[Any] = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowercase__ : Optional[int] = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__A : int = bs[:]
__A : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
__A : Union[str, Any] = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Tuple:
__A : List[str] = set()
__A : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token
__A : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token
__A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : List[str] = json.load(_UpperCAmelCase)
__A : int = {v: k for k, v in self.encoder.items()}
__A : Dict = errors # how to handle errors in decoding
__A : List[Any] = bytes_to_unicode()
__A : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8') as merges_handle:
__A : Dict = merges_handle.read().split('\n')[1:-1]
__A : Optional[Any] = [tuple(merge.split()) for merge in bpe_merges]
__A : List[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Any = {}
__A : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : List[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A : List[Any] = tuple(_UpperCAmelCase)
__A : Optional[Any] = get_pairs(_UpperCAmelCase)
if not pairs:
return token
while True:
__A : Union[str, Any] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase: self.bpe_ranks.get(_UpperCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
__A ,__A : int = bigram
__A : List[str] = []
__A : str = 0
while i < len(_UpperCAmelCase):
try:
__A : Any = word.index(_UpperCAmelCase , _UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__A : Tuple = j
if word[i] == first and i < len(_UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__A : Dict = tuple(_UpperCAmelCase)
__A : Optional[Any] = new_word
if len(_UpperCAmelCase) == 1:
break
else:
__A : Optional[Any] = get_pairs(_UpperCAmelCase)
__A : Any = ' '.join(_UpperCAmelCase)
__A : Any = word
return word
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for token in re.findall(self.pat , _UpperCAmelCase):
__A : Tuple = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase).split(' '))
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = ''.join(_UpperCAmelCase)
__A : List[Any] = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : Union[str, Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
__A : Optional[Any] = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
__A : str = token_index
writer.write(' '.join(_UpperCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
__A : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1, 1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase):
'''simple docstring'''
__A : List[str] = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase) > 0 and not text[0].isspace()):
__A : List[Any] = ' ' + text
return (text, kwargs) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__A : str = kwargs.pop('feature_extractor')
__A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')
# first, apply the image processor
__A : Dict = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__A : Dict = features['words']
__A : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__A : Dict = features.pop('pixel_values')
if return_overflowing_tokens is True:
__A : str = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'])
__A : str = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(_UpperCAmelCase)} and {len(_UpperCAmelCase)}')
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 2_00 ) -> int:
__A : Optional[Any] = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
__A : List[str] = [0] * (pence + 1)
__A : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__snake_case , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82 | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 10 ) -> str:
if not isinstance(__snake_case , __snake_case ) or n < 0:
raise ValueError('Invalid input' )
__A : Optional[Any] = 10**n
__A : List[str] = 2_84_33 * (pow(2 , 7_83_04_57 , __snake_case )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""") | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : int = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase__ : Tuple = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Dict ) -> Any:
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
__A : Optional[int] = json.loads(f.read() )
__A : Any = collections.OrderedDict()
__A : Optional[Any] = collections.OrderedDict()
__A : Union[str, Any] = collections.OrderedDict()
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
__A : Dict = f.readlines()
__A : Tuple = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(__snake_case ):
__A : int = b
__A : int = idx
for wd in b:
__A : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|startoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , do_clean_text=_UpperCAmelCase , **_UpperCAmelCase , )
if not os.path.isfile(_UpperCAmelCase):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
if not os.path.isfile(_UpperCAmelCase):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
__A : Tuple = do_clean_text
__A ,__A ,__A ,__A : Dict = load_vocab_and_emoji(_UpperCAmelCase , _UpperCAmelCase)
__A : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.raw_vocab)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.subword_tokenizer.tokenize(_UpperCAmelCase , clean=self.do_clean_text)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = ''.join(_UpperCAmelCase).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase) + [self.eos_token_id])
if len(_UpperCAmelCase) > self.model_max_length:
__A : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = 0
if os.path.isdir(_UpperCAmelCase):
__A : Dict = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'])
else:
__A : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__A : int = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!')
__A : Dict = token_index
writer.write(','.join(_UpperCAmelCase) + '\n')
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
json.dump(self.emoji , _UpperCAmelCase)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = vocab # same as swe
__A : Union[str, Any] = ids_to_tokens # same as bpe
__A : Optional[int] = emoji
__A : List[str] = np.max([len(_UpperCAmelCase) for w in self.vocab.keys()])
__A : Optional[int] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)')
__A : Optional[Any] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*')
__A : str = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
__A : Any = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
__A : List[str] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
__A : Any = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
__A : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__A : Dict = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__A : int = str.maketrans({k: '<BLOCK>' for k in keisen + blocks})
def __len__( self):
'''simple docstring'''
return len(self.ids_to_tokens)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.content_repattera.sub('<URL>' , _UpperCAmelCase)
__A : Any = self.content_repattera.sub('<EMAIL>' , _UpperCAmelCase)
__A : Union[str, Any] = self.content_repattera.sub('<TEL>' , _UpperCAmelCase)
__A : List[str] = self.content_repattera.sub('<DATE>' , _UpperCAmelCase)
__A : Optional[Any] = self.content_repattera.sub('<DATE>' , _UpperCAmelCase)
__A : Optional[int] = self.content_repattera.sub('<PRICE>' , _UpperCAmelCase)
__A : List[str] = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
__A : str = content.replace('<BLOCK><BLOCK>' , '<BLOCK>')
return content
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Union[str, Any] = text.replace(' ' , '<SP>')
__A : Tuple = text.replace(' ' , '<SP>')
__A : Optional[Any] = text.replace('\r\n' , '<BR>')
__A : Tuple = text.replace('\n' , '<BR>')
__A : List[Any] = text.replace('\r' , '<BR>')
__A : Optional[Any] = text.replace('\t' , '<TAB>')
__A : Union[str, Any] = text.replace('—' , 'ー')
__A : int = text.replace('−' , 'ー')
for k, v in self.emoji["emoji"].items():
if k in text:
__A : Union[str, Any] = text.replace(_UpperCAmelCase , _UpperCAmelCase)
if clean:
__A : int = self.clean_text(_UpperCAmelCase)
def check_simbol(_UpperCAmelCase):
__A : str = x.encode()
if len(_UpperCAmelCase) == 1 and len(_UpperCAmelCase) == 2:
__A : Dict = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0Xc_2_a_1 and c <= 0Xc_2_b_f)
or (c >= 0Xc_7_8_0 and c <= 0Xc_7_8_3)
or (c >= 0Xc_a_b_9 and c <= 0Xc_b_b_f)
or (c >= 0Xc_c_8_0 and c <= 0Xc_d_a_2)
):
return True
return False
def checkuae(_UpperCAmelCase):
__A : Optional[int] = x.encode()
if len(_UpperCAmelCase) == 1 and len(_UpperCAmelCase) == 3:
__A : Dict = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0Xe_2_8_0_8_0 and c <= 0Xe_2_b_0_7_f:
return True
return False
__A : Union[str, Any] = 0
__A : int = []
while pos < len(_UpperCAmelCase):
__A : Optional[int] = min(len(_UpperCAmelCase) , pos + self.maxlen + 1) if text[pos] == '<' else pos + 3
__A : Dict = [] # (token_id, token, pos)
for e in range(_UpperCAmelCase , _UpperCAmelCase , -1):
__A : List[str] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_UpperCAmelCase) > 2:
__A : Union[str, Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(_UpperCAmelCase) > 0:
# the smallest token_id is adopted
__A ,__A ,__A : List[Any] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x[0])[0]
result.append(_UpperCAmelCase)
__A : Optional[int] = e
else:
__A : int = pos + 1
__A : List[Any] = text[pos:end]
if check_simbol(_UpperCAmelCase):
result.append('<KIGOU>')
elif checkuae(_UpperCAmelCase):
result.append('<U2000U2BFF>')
else:
for i in wd.encode('utf-8'):
result.append('<|byte%d|>' % i)
__A : Optional[Any] = end
return result
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase="\n"):
'''simple docstring'''
__A : Optional[int] = []
__A : Tuple = []
__A : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(_UpperCAmelCase) > 0:
words.append(bytearray(_UpperCAmelCase).decode('utf-8' , errors='replace'))
__A : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word])
elif word == "<SP>":
words.append(' ')
elif word == "<BR>":
words.append(_UpperCAmelCase)
elif word == "<TAB>":
words.append('\t')
elif word == "<BLOCK>":
words.append('▀')
elif word == "<KIGOU>":
words.append('ǀ')
elif word == "<U2000U2BFF>":
words.append('‖')
else:
words.append(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
words.append(bytearray(_UpperCAmelCase).decode('utf-8' , errors='replace'))
__A : Optional[int] = ''.join(_UpperCAmelCase)
return text | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> Optional[Any]:
__A : Any = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__snake_case , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__snake_case , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__snake_case )
return parser.parse_args()
def _lowerCAmelCase ( ) -> Any:
__A : str = parse_args()
# Import training_script as a module.
__A : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : List[str] = script_fpath.stem
__A : Union[str, Any] = importlib.import_module(__snake_case )
# Patch sys.argv
__A : int = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase__ : List[str] = Mapping[str, np.ndarray]
lowercase__ : Dict = Mapping[str, Any] # Is a nested dict.
lowercase__ : Dict = 0.01
@dataclasses.dataclass(frozen=a__ )
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase = None
# Chain corresponding to each parent
lowerCAmelCase = None
def _lowerCAmelCase ( __snake_case : str ) -> Protein:
__A : Optional[Any] = r'(\[[A-Z]+\]\n)'
__A : List[str] = [tag.strip() for tag in re.split(__snake_case , __snake_case ) if len(__snake_case ) > 0]
__A : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
__A : List[str] = ["N", "CA", "C"]
__A : int = None
__A : Dict = None
__A : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
__A : Optional[int] = g[1][0].strip()
for i in range(len(__snake_case ) ):
if seq[i] not in residue_constants.restypes:
__A : Optional[int] = 'X' # FIXME: strings are immutable
__A : int = np.array(
[residue_constants.restype_order.get(__snake_case , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__A : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__snake_case , g[1][axis].split() ) ) )
__A : List[str] = np.array(__snake_case )
__A : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__A : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__A : Dict = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
__A : Any = np.zeros(
(
len(__snake_case ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__A : Tuple = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__snake_case , atom_mask=__snake_case , aatype=__snake_case , residue_index=np.arange(len(__snake_case ) ) , b_factors=__snake_case , )
def _lowerCAmelCase ( __snake_case : Protein , __snake_case : int = 0 ) -> List[str]:
__A : List[str] = []
__A : Dict = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
__A : Optional[int] = prot.parents
__A : List[Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__A : Tuple = [p for i, p in zip(__snake_case , __snake_case ) if i == chain_id]
if parents is None or len(__snake_case ) == 0:
__A : List[str] = ['N/A']
pdb_headers.append(f'PARENT {" ".join(__snake_case )}' )
return pdb_headers
def _lowerCAmelCase ( __snake_case : Protein , __snake_case : str ) -> str:
__A : List[str] = []
__A : Union[str, Any] = pdb_str.split('\n' )
__A : Tuple = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
__A : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__A : List[Any] = []
if prot.parents_chain_index is not None:
__A : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__snake_case ) , [] )
parent_dict[str(__snake_case )].append(__snake_case )
__A : Dict = max([int(__snake_case ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__A : Any = parent_dict.get(str(__snake_case ) , ['N/A'] )
parents_per_chain.append(__snake_case )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__A : Any = [['N/A']]
def make_parent_line(__snake_case : Sequence[str] ) -> str:
return f'PARENT {" ".join(__snake_case )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__A : Any = 0
for i, l in enumerate(__snake_case ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__snake_case )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__snake_case ):
__A : Union[str, Any] = parents_per_chain[chain_counter]
else:
__A : List[Any] = ['N/A']
out_pdb_lines.append(make_parent_line(__snake_case ) )
return "\n".join(__snake_case )
def _lowerCAmelCase ( __snake_case : Protein ) -> str:
__A : List[str] = residue_constants.restypes + ['X']
def res_atoa(__snake_case : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
__A : List[str] = residue_constants.atom_types
__A : List[str] = []
__A : Dict = prot.atom_mask
__A : Any = prot.aatype
__A : Tuple = prot.atom_positions
__A : Any = prot.residue_index.astype(np.intaa )
__A : List[Any] = prot.b_factors
__A : Any = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
__A : str = get_pdb_headers(__snake_case )
if len(__snake_case ) > 0:
pdb_lines.extend(__snake_case )
__A : int = aatype.shape[0]
__A : Tuple = 1
__A : int = 0
__A : Union[str, Any] = string.ascii_uppercase
__A : Optional[Any] = None
# Add all atom sites.
for i in range(__snake_case ):
__A : Union[str, Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__snake_case , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__A : Any = 'ATOM'
__A : Tuple = atom_name if len(__snake_case ) == 4 else f' {atom_name}'
__A : Tuple = ''
__A : Tuple = ''
__A : Any = 1.00
__A : Any = atom_name[0] # Protein supports only C, N, O, S, this works.
__A : int = ''
__A : Tuple = 'A'
if chain_index is not None:
__A : Tuple = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__A : Any = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(__snake_case )
atom_index += 1
__A : str = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__A : str = True
__A : List[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
__A : Any = 'TER'
__A : List[Any] = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(__snake_case )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__snake_case , __snake_case ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(__snake_case )
def _lowerCAmelCase ( __snake_case : Protein ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase ( __snake_case : FeatureDict , __snake_case : ModelOutput , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[str] = None , __snake_case : Optional[Sequence[str]] = None , __snake_case : Optional[Sequence[int]] = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=__snake_case , remark=__snake_case , parents=__snake_case , parents_chain_index=__snake_case , ) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
lowercase__ : List[str] = '''2020.9.26'''
lowercase__ : Any = '''xcodz-dot, cclaus, dhruvmanila'''
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> tuple[float, float]:
if not all(isinstance(__snake_case , (float, int) ) for val in locals().values() ):
__A : int = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__snake_case )
__A : Any = ((x * distance) / (z + distance)) * scale
__A : List[str] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : str , __snake_case : float ) -> tuple[float, float, float]:
if not isinstance(__snake_case , __snake_case ):
raise TypeError('Axis must be a str' )
__A : str = locals()
del input_variables["axis"]
if not all(isinstance(__snake_case , (float, int) ) for val in input_variables.values() ):
__A : List[Any] = (
'Input values except axis must either be float or int: '
f'{list(input_variables.values() )}'
)
raise TypeError(__snake_case )
__A : Union[str, Any] = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
__A : Tuple = x * math.cos(__snake_case ) - y * math.sin(__snake_case )
__A : Tuple = y * math.cos(__snake_case ) + x * math.sin(__snake_case )
__A : str = z
elif axis == "x":
__A : Any = y * math.cos(__snake_case ) - z * math.sin(__snake_case )
__A : List[Any] = z * math.cos(__snake_case ) + y * math.sin(__snake_case )
__A : Any = x
elif axis == "y":
__A : Optional[Any] = x * math.cos(__snake_case ) - z * math.sin(__snake_case )
__A : List[str] = z * math.cos(__snake_case ) + x * math.sin(__snake_case )
__A : int = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused')
__A : str = load_dataset('ashraq/esc50')
__A : str = dataset['train']['audio'][-1]['array']
__A : Union[str, Any] = audio_classifier(_UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
__A : Any = load_dataset('ashraq/esc50')
__A : Dict = dataset['train']['audio'][-1]['array']
__A : Tuple = audio_classifier(_UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
__A : Tuple = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
__A : List[str] = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5)
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
lowercase__ : str = 0 # The first color of the flag.
lowercase__ : List[Any] = 1 # The second color of the flag.
lowercase__ : str = 2 # The third color of the flag.
lowercase__ : List[str] = (red, white, blue)
def _lowerCAmelCase ( __snake_case : list ) -> list:
if not sequence:
return []
if len(__snake_case ) == 1:
return list(__snake_case )
__A : str = 0
__A : Tuple = len(__snake_case ) - 1
__A : int = 0
while mid <= high:
if sequence[mid] == colors[0]:
__A ,__A : Dict = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__A ,__A : str = sequence[high], sequence[mid]
high -= 1
else:
__A : Dict = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(__snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Union[str, Any] = input('''Enter numbers separated by commas:\n''').strip()
lowercase__ : Tuple = [int(item.strip()) for item in user_input.split(''',''')]
print(f"""{dutch_national_flag_sort(unsorted)}""") | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import os
lowercase__ : Tuple = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def _lowerCAmelCase ( __snake_case : str ) -> int:
__A : Tuple = 0
__A : Optional[Any] = 0
while index < len(__snake_case ) - 1:
__A : str = SYMBOLS[numerals[index]]
__A : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase ( __snake_case : int ) -> str:
__A : str = ''
__A : List[Any] = num // 10_00
numerals += m_count * "M"
num %= 10_00
__A : Dict = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
__A : Any = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase ( __snake_case : str = "/p089_roman.txt" ) -> int:
__A : Dict = 0
with open(os.path.dirname(__snake_case ) + roman_numerals_filename ) as filea:
__A : str = filea.readlines()
for line in lines:
__A : List[str] = line.strip()
__A : List[str] = parse_roman_numerals(__snake_case )
__A : str = generate_roman_numerals(__snake_case )
savings += len(__snake_case ) - len(__snake_case )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
def _lowerCAmelCase ( *__snake_case : int , **__snake_case : List[str] ) -> Tuple:
requires_backends(__snake_case , ['torch'] )
def _lowerCAmelCase ( *__snake_case : Union[str, Any] , **__snake_case : Dict ) -> Dict:
requires_backends(__snake_case , ['torch'] )
def _lowerCAmelCase ( *__snake_case : Any , **__snake_case : List[str] ) -> str:
requires_backends(__snake_case , ['torch'] )
def _lowerCAmelCase ( *__snake_case : int , **__snake_case : Tuple ) -> Union[str, Any]:
requires_backends(__snake_case , ['torch'] )
def _lowerCAmelCase ( *__snake_case : List[Any] , **__snake_case : Optional[int] ) -> Any:
requires_backends(__snake_case , ['torch'] )
def _lowerCAmelCase ( *__snake_case : Optional[Any] , **__snake_case : int ) -> Tuple:
requires_backends(__snake_case , ['torch'] )
def _lowerCAmelCase ( *__snake_case : Any , **__snake_case : List[Any] ) -> Union[str, Any]:
requires_backends(__snake_case , ['torch'] )
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
class SCREAMING_SNAKE_CASE (metaclass=a__ ):
lowerCAmelCase = ['''torch''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch'])
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(cls , ['torch']) | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : str = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''xmod'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=("en_XX",) , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[Any] = hidden_act
__A : Optional[int] = intermediate_size
__A : Dict = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : List[Any] = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : int = initializer_range
__A : Optional[Any] = layer_norm_eps
__A : Any = position_embedding_type
__A : Tuple = use_cache
__A : Dict = classifier_dropout
__A : Union[str, Any] = pre_norm
__A : str = adapter_reduction_factor
__A : Tuple = adapter_layer_norm
__A : str = adapter_reuse_layer_norm
__A : Dict = ln_before_adapter
__A : str = list(_UpperCAmelCase)
__A : Optional[int] = default_language
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
__A : Optional[Any] = Generator(
cache_dir=_UpperCAmelCase , features=_UpperCAmelCase , generator=_UpperCAmelCase , gen_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.streaming:
__A : Tuple = self.builder.as_streaming_dataset(split='train')
# Build regular (map-style) dataset
else:
__A : str = None
__A : List[Any] = None
__A : Optional[Any] = None
__A : str = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
__A : List[Any] = self.builder.as_dataset(
split='train' , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory)
return dataset | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
lowerCAmelCase = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
lowerCAmelCase = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Benchmark training of model'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Verbose memory tracing'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Trace memory line by line'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Save result to a CSV file'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Save all print statements in a log file'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to print environment information'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
lowerCAmelCase = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
lowerCAmelCase = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
lowerCAmelCase = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
lowerCAmelCase = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
lowerCAmelCase = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
lowerCAmelCase = field(
default=f'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
lowerCAmelCase = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if len(self.models) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].')
return self.models
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.')
return False
else:
return True | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
from PIL import Image
def _lowerCAmelCase ( __snake_case : Image , __snake_case : float ) -> Image:
def brightness(__snake_case : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase__ : Optional[int] = change_brightness(img, 1_00)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=512 , _UpperCAmelCase="cls" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Any = project_dim
__A : List[str] = pooler_fn
__A : Tuple = learn_encoder
__A : Optional[int] = use_attention_mask
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = [r'''pooler''', r'''logit_scale''']
lowerCAmelCase = [r'''position_ids''', r'''predictions.decoder.bias''']
lowerCAmelCase = '''roberta'''
lowerCAmelCase = RobertaSeriesConfig
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase)
__A : List[str] = XLMRobertaModel(_UpperCAmelCase)
__A : List[str] = nn.Linear(config.hidden_size , config.project_dim)
__A : List[Any] = getattr(_UpperCAmelCase , 'has_pre_transformation' , _UpperCAmelCase)
if self.has_pre_transformation:
__A : Tuple = nn.Linear(config.hidden_size , config.project_dim)
__A : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
'''simple docstring'''
__A : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__A : Any = self.base_model(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_UpperCAmelCase , )
if self.has_pre_transformation:
__A : Optional[int] = outputs['hidden_states'][-2]
__A : Dict = self.pre_LN(_UpperCAmelCase)
__A : Optional[Any] = self.transformation_pre(_UpperCAmelCase)
return TransformationModelOutput(
projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__A : Optional[Any] = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''umt5'''
lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=25_0112 , _UpperCAmelCase=512 , _UpperCAmelCase=64 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=None , _UpperCAmelCase=6 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=1.0 , _UpperCAmelCase="gated-gelu" , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="T5Tokenizer" , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_size
__A : List[Any] = d_model
__A : Union[str, Any] = d_kv
__A : Dict = d_ff
__A : Tuple = num_layers
__A : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__A : List[Any] = num_heads
__A : Any = relative_attention_num_buckets
__A : Tuple = relative_attention_max_distance
__A : Optional[int] = dropout_rate
__A : List[Any] = layer_norm_epsilon
__A : Optional[int] = initializer_factor
__A : str = feed_forward_proj
__A : int = use_cache
__A : str = self.feed_forward_proj.split('-')
__A : Dict = act_info[-1]
__A : str = act_info[0] == 'gated'
if len(_UpperCAmelCase) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
__A : Tuple = 'gelu_new'
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.d_model
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.num_heads
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.num_layers
class SCREAMING_SNAKE_CASE (a__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__A : int = 'past_encoder_sequence + sequence'
__A : List[str] = {0: 'batch'}
__A : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__A : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__A : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 13
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return 5e-4 | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _lowerCAmelCase ( ) -> List[str]:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
__A : Any = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(__snake_case , __snake_case , num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(__snake_case , __snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def _lowerCAmelCase ( __snake_case : int ) -> Tuple:
__A : Optional[Any] = [1, 2]
__A : int = {'a': 1, 'b': 2}
__A : Dict = {'a': [1, 2], 'b': [3, 4]}
__A : Optional[int] = {'a': {'1': 1}, 'b': 2}
__A : List[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__A : List[Any] = [2, 3]
__A : Tuple = {'a': 2, 'b': 3}
__A : List[Any] = {'a': [2, 3], 'b': [4, 5]}
__A : List[Any] = {'a': {'1': 2}, 'b': 3}
__A : List[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : List[str] = batch_size
__A : Tuple = seq_length
__A : Optional[Any] = is_training
__A : str = use_attention_mask
__A : Dict = use_token_type_ids
__A : int = use_labels
__A : Any = vocab_size
__A : List[str] = hidden_size
__A : str = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : Any = intermediate_size
__A : Dict = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : int = type_sequence_label_size
__A : str = initializer_range
__A : Tuple = num_choices
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : Union[str, Any] = None
if self.use_attention_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__A : Dict = None
if self.use_token_type_ids:
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : int = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.prepare_config_and_inputs()
__A ,__A ,__A ,__A : Any = config_and_inputs
__A : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.prepare_config_and_inputs()
__A ,__A ,__A ,__A : Optional[int] = config_and_inputs
__A : Any = True
__A : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = FlaxRobertaModelTester(self)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A : Any = model_class_name.from_pretrained('roberta-base' , from_pt=_UpperCAmelCase)
__A : List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(_UpperCAmelCase) | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ : List[str] = '''src/diffusers'''
lowercase__ : Dict = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ : str = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ : List[Any] = spec.loader.load_module()
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> Tuple:
return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , __snake_case ) is not None
def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]:
__A : Tuple = object_name.split('.' )
__A : str = 0
# First let's find the module where our object lives.
__A : int = parts[i]
while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , f'{module}.py' ) ):
i += 1
if i < len(__snake_case ):
__A : Tuple = os.path.join(__snake_case , parts[i] )
if i >= len(__snake_case ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__snake_case , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
__A : Tuple = ''
__A : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(__snake_case ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__snake_case ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__A : int = line_index
while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : List[str] = lines[start_index:line_index]
return "".join(__snake_case )
lowercase__ : Optional[int] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase__ : Optional[Any] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase__ : Optional[int] = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( __snake_case : str ) -> Union[str, Any]:
__A : Dict = code.split('\n' )
__A : Optional[Any] = 0
while idx < len(__snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__snake_case ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[Any]:
__A : Union[str, Any] = len(get_indent(__snake_case ) ) > 0
if has_indent:
__A : List[Any] = f'class Bla:\n{code}'
__A : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__snake_case )
__A : Tuple = black.format_str(__snake_case , mode=__snake_case )
__A ,__A : Tuple = style_docstrings_in_code(__snake_case )
return result[len('class Bla:\n' ) :] if has_indent else result
def _lowerCAmelCase ( __snake_case : int , __snake_case : int=False ) -> Union[str, Any]:
with open(__snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Union[str, Any] = f.readlines()
__A : int = []
__A : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__snake_case ):
__A : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__A ,__A ,__A : Dict = search.groups()
__A : Any = find_code_in_diffusers(__snake_case )
__A : Dict = get_indent(__snake_case )
__A : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__A : Optional[int] = theoretical_indent
__A : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__A : Any = True
while line_index < len(__snake_case ) and should_continue:
line_index += 1
if line_index >= len(__snake_case ):
break
__A : Tuple = lines[line_index]
__A : Tuple = _should_continue(__snake_case , __snake_case ) and re.search(f'^{indent}# End copy' , __snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : Union[str, Any] = lines[start_index:line_index]
__A : Optional[Any] = ''.join(__snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__A : Union[str, Any] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__snake_case ) is None]
__A : str = '\n'.join(__snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(__snake_case ) > 0:
__A : str = replace_pattern.replace('with' , '' ).split(',' )
__A : Optional[int] = [_re_replace_pattern.search(__snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__A ,__A ,__A : Tuple = pattern.groups()
__A : Optional[int] = re.sub(__snake_case , __snake_case , __snake_case )
if option.strip() == "all-casing":
__A : Optional[int] = re.sub(obja.lower() , obja.lower() , __snake_case )
__A : str = re.sub(obja.upper() , obja.upper() , __snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__A : Any = blackify(lines[start_index - 1] + theoretical_code )
__A : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__A : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
__A : int = start_index + 1
if overwrite and len(__snake_case ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
return diffs
def _lowerCAmelCase ( __snake_case : bool = False ) -> int:
__A : List[Any] = glob.glob(os.path.join(__snake_case , '**/*.py' ) , recursive=__snake_case )
__A : Optional[Any] = []
for filename in all_files:
__A : List[str] = is_copy_consistent(__snake_case , __snake_case )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__snake_case ) > 0:
__A : Dict = '\n'.join(__snake_case )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase__ : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite) | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import enum
import shutil
import sys
lowercase__ , lowercase__ : List[Any] = shutil.get_terminal_size()
lowercase__ : str = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class SCREAMING_SNAKE_CASE (enum.Enum ):
lowerCAmelCase = 0
lowerCAmelCase = 1
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Union[str, Any]="" ) -> Union[str, Any]:
sys.stdout.write(str(__snake_case ) + end )
sys.stdout.flush()
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Any , __snake_case : List[str]="" ) -> Tuple:
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , __snake_case )
def _lowerCAmelCase ( ) -> Union[str, Any]:
forceWrite('\r' )
def _lowerCAmelCase ( __snake_case : int , __snake_case : str ) -> Any:
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def _lowerCAmelCase ( ) -> List[Any]:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def _lowerCAmelCase ( ) -> int:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH ) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> Dict:
# Initialise PyTorch model
__A : Tuple = LxmertConfig.from_json_file(__snake_case )
print(f'Building PyTorch model from configuration: {config}' )
__A : List[str] = LxmertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 60_08_51_47_51_43 ) -> int:
try:
__A : Tuple = int(__snake_case )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__A : str = 1
__A : Union[str, Any] = 2
while i * i <= n:
while n % i == 0:
__A : Optional[int] = i
n //= i
i += 1
if n > 1:
__A : List[str] = n
return int(__snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : List[str] = num_channels
__A : str = min_resolution
__A : Optional[int] = max_resolution
__A : Optional[int] = do_resize
__A : Optional[int] = size
__A : Any = do_normalize
__A : Dict = image_mean
__A : Optional[Any] = image_std
__A : Optional[int] = do_rescale
__A : List[Any] = rescale_factor
__A : int = do_pad
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
if not batched:
__A : List[Any] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image):
__A ,__A : Any = image.size
else:
__A ,__A : List[Any] = image.shape[1], image.shape[2]
if w < h:
__A : Union[str, Any] = int(self.size['shortest_edge'] * h / w)
__A : Dict = self.size['shortest_edge']
elif w > h:
__A : Any = self.size['shortest_edge']
__A : Optional[Any] = int(self.size['shortest_edge'] * w / h)
else:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : Tuple = []
for image in image_inputs:
__A ,__A : int = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__A : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[0])[0]
__A : int = max(_UpperCAmelCase , key=lambda _UpperCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean'))
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_rescale'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_pad'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
__A : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A ,__A : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
__A : Dict = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : Union[str, Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__A ,__A : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
__A ,__A : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
__A : Dict = json.loads(f.read())
__A : str = {'image_id': 3_9769, 'annotations': target}
# encode them
__A : List[Any] = DeformableDetrImageProcessor()
__A : Optional[int] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : List[str] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : Tuple = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : Tuple = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify orig_size
__A : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : Dict = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
__A : Optional[int] = json.loads(f.read())
__A : int = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
__A : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__A : str = DeformableDetrImageProcessor(format='coco_panoptic')
__A : Any = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='pt')
# verify pixel values
__A : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape , _UpperCAmelCase)
__A : List[str] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4))
# verify area
__A : Any = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCAmelCase))
# verify boxes
__A : Optional[int] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCAmelCase)
__A : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCAmelCase , atol=1e-3))
# verify image_id
__A : List[Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCAmelCase))
# verify is_crowd
__A : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCAmelCase))
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCAmelCase))
# verify masks
__A : Dict = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCAmelCase)
# verify orig_size
__A : Optional[int] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCAmelCase))
# verify size
__A : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCAmelCase)) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = path_or_paths
__A : List[str] = split if split or isinstance(_UpperCAmelCase , _UpperCAmelCase) else 'train'
__A : Any = features
__A : Dict = cache_dir
__A : List[str] = keep_in_memory
__A : Union[str, Any] = streaming
__A : Tuple = num_proc
__A : Union[str, Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Dict = features
__A : Any = cache_dir
__A : str = keep_in_memory
__A : Optional[Any] = streaming
__A : List[str] = num_proc
__A : List[Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : int = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''vocab.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowercase__ : Optional[int] = {'''mgp-str''': 27}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[s]" , _UpperCAmelCase="[GO]" , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Union[str, Any] = json.load(_UpperCAmelCase)
__A : str = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.vocab)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = []
for s in text:
char_tokens.extend(_UpperCAmelCase)
return char_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error('Vocabulary path ({}) should be a directory'.format(_UpperCAmelCase))
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
return (vocab_file,) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> int:
__A : Optional[int] = [1]
__A ,__A ,__A : List[str] = 0, 0, 0
__A : str = ugly_nums[ia] * 2
__A : Optional[int] = ugly_nums[ia] * 3
__A : Tuple = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
__A : List[str] = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
__A : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__A : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__A : int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_00) = }""") | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=0.9 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__A : List[Any] = size if size is not None else {'shortest_edge': 30}
__A : int = crop_size if crop_size is not None else {'height': 30, 'width': 30}
__A : Optional[Any] = parent
__A : List[Any] = batch_size
__A : Any = num_channels
__A : Union[str, Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : int = do_resize_and_center_crop
__A : int = size
__A : List[str] = crop_pct
__A : str = crop_size
__A : Optional[Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[Any] = image_std
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = PoolFormerImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize_and_center_crop'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'crop_pct'))
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean'))
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 30})
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30})
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__A : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
if not is_accelerate_available():
return method
__A : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__snake_case ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__snake_case : int , **__snake_case : int ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__snake_case , **__snake_case )
return wrapper | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import queue
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = data
__A : Tuple = None
__A : str = None
def _lowerCAmelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
__A : Tuple = input('Enter the value of the root node: ' ).strip().lower()
__A : queue.Queue = queue.Queue()
__A : str = TreeNode(int(__snake_case ) )
q.put(__snake_case )
while not q.empty():
__A : Any = q.get()
__A : List[Any] = f'Enter the left node of {node_found.data}: '
__A : Tuple = input(__snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
__A : Union[str, Any] = TreeNode(int(__snake_case ) )
__A : List[Any] = left_node
q.put(__snake_case )
__A : Dict = f'Enter the right node of {node_found.data}: '
__A : Union[str, Any] = input(__snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
__A : List[Any] = TreeNode(int(__snake_case ) )
__A : int = right_node
q.put(__snake_case )
raise
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
__A : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
__A : Optional[Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
__A : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
__A : str = []
while not q.empty():
__A : List[Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__snake_case )
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
__A : list[TreeNode] = []
__A : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(__snake_case )
__A : List[str] = n.left
# end of while means current node doesn't have left child
__A : Optional[int] = stack.pop()
# start to traverse its right child
__A : Optional[int] = n.right
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
__A : list[TreeNode] = []
__A : List[Any] = node
while n or stack:
while n:
stack.append(__snake_case )
__A : Tuple = n.left
__A : int = stack.pop()
print(n.data , end=',' )
__A : Optional[Any] = n.right
def _lowerCAmelCase ( __snake_case : TreeNode ) -> None:
if not isinstance(__snake_case , __snake_case ) or not node:
return
__A ,__A : int = [], []
__A : Any = node
stacka.append(__snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
__A : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def _lowerCAmelCase ( __snake_case : str = "" , __snake_case : Dict=50 , __snake_case : Optional[int]="*" ) -> str:
if not s:
return "\n" + width * char
__A ,__A : Optional[Any] = divmod(width - len(__snake_case ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
lowercase__ : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt()) | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''sew-d'''
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase=2 , _UpperCAmelCase=512 , _UpperCAmelCase=256 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=("p2c", "c2p") , _UpperCAmelCase="layer_norm" , _UpperCAmelCase="gelu_python" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-7 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase)
__A : List[Any] = hidden_size
__A : Any = feat_extract_norm
__A : List[str] = feat_extract_activation
__A : Union[str, Any] = list(_UpperCAmelCase)
__A : List[str] = list(_UpperCAmelCase)
__A : Dict = list(_UpperCAmelCase)
__A : Any = conv_bias
__A : str = num_conv_pos_embeddings
__A : Optional[int] = num_conv_pos_embedding_groups
__A : Optional[Any] = len(self.conv_dim)
__A : Union[str, Any] = num_hidden_layers
__A : int = intermediate_size
__A : Union[str, Any] = squeeze_factor
__A : Tuple = max_position_embeddings
__A : Optional[Any] = position_buckets
__A : Union[str, Any] = share_att_key
__A : Union[str, Any] = relative_attention
__A : Optional[int] = norm_rel_ebd
__A : Dict = list(_UpperCAmelCase)
__A : Optional[Any] = hidden_act
__A : List[str] = num_attention_heads
__A : Union[str, Any] = hidden_dropout
__A : List[Any] = attention_dropout
__A : List[Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : str = final_dropout
__A : Dict = layer_norm_eps
__A : int = feature_layer_norm_eps
__A : int = initializer_range
__A : str = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'
F'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Dict = apply_spec_augment
__A : Optional[Any] = mask_time_prob
__A : Union[str, Any] = mask_time_length
__A : List[str] = mask_time_min_masks
__A : Dict = mask_feature_prob
__A : List[Any] = mask_feature_length
__A : Dict = mask_feature_min_masks
# ctc loss
__A : str = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# sequence classification
__A : List[str] = use_weighted_layer_sum
__A : List[Any] = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1) | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
lowercase__ : Tuple = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowercase__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase ( __snake_case : str ) -> str:
__A : Optional[int] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def _lowerCAmelCase ( __snake_case : str ) -> str:
if set(__snake_case ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__A : str = ''
for word in coded.split():
while len(__snake_case ) != 0:
decoded += decode_dict[word[:5]]
__A : Dict = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase__ : int = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _lowerCAmelCase ( __snake_case : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__A : List[str] = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__A : List[str] = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__A : List[Any] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""") | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : Tuple=0 ) -> Any:
# Format the message.
if name is None:
__A : Union[str, Any] = None
else:
__A : List[Any] = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
__A : Dict = fmt.format(__snake_case )
# Print and recurse (if needed).
if isinstance(__snake_case , __snake_case ):
if msg is not None:
print(__snake_case )
for k in val.keys():
recursive_print(__snake_case , val[k] , spaces + 2 )
elif isinstance(__snake_case , torch.Tensor ):
print(__snake_case , ':' , val.size() )
else:
print(__snake_case , ':' , __snake_case )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] ) -> Union[str, Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__A : Optional[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__A : Optional[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
__A : List[str] = param.view(*__snake_case )
__A : Tuple = param.transpose(0 , 2 )
__A : Dict = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__A : Dict = (num_heads, num_splits, hidden_size) + input_shape[1:]
__A : str = param.view(*__snake_case )
__A : Optional[Any] = param.transpose(0 , 1 ).contiguous()
__A : Dict = param.view(*__snake_case )
return param
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Dict ) -> Union[str, Any]:
# The converted output model.
__A : str = {}
# old versions did not store training args
__A : List[str] = input_state_dict.get('args' , __snake_case )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__A : Tuple = ds_args.padded_vocab_size
__A : str = ds_args.max_position_embeddings
__A : str = ds_args.hidden_size
__A : Any = ds_args.num_layers
__A : List[Any] = ds_args.num_attention_heads
__A : Union[str, Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__A : List[Any] = config.n_head
# The hidden_size per head.
__A : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__A : Optional[Any] = input_state_dict['checkpoint_version']
else:
__A : List[str] = 0.0
# The model.
__A : Any = input_state_dict['model']
# The language model.
__A : Tuple = model['language_model']
# The embeddings.
__A : Any = lm['embedding']
# The word embeddings.
__A : int = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
__A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
__A : Any = word_embeddings
# The position embeddings.
__A : List[Any] = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__A : Dict = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
__A : Any = pos_embeddings
# The transformer.
__A : Any = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
__A : Optional[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
__A : int = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__A : str = layer_re.match(__snake_case )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__A : Any = int(m.group(1 ) )
# The name of the operation.
__A : List[Any] = m.group(2 )
# Is it a weight or a bias?
__A : str = m.group(3 )
# The name of the layer.
__A : Dict = f'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
__A : str = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
__A : Union[str, Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__A : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __snake_case , __snake_case )
__A : Union[str, Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
__A : Any = torch.tensor(-1e4 , dtype=torch.floataa )
__A : List[Any] = masked_bias
__A : List[Any] = fix_query_key_value_ordering(__snake_case , __snake_case , 3 , __snake_case , __snake_case )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__A : List[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
__A : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__A : Any = fix_query_key_value_ordering(__snake_case , __snake_case , 3 , __snake_case , __snake_case )
# Store. No change of shape.
__A : Optional[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__A : Union[str, Any] = megatron_to_transformers[op_name]
__A : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__A : Optional[Any] = megatron_to_transformers[op_name]
__A : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__A : List[Any] = transformer['final_layernorm.weight']
__A : int = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
__A : Union[str, Any] = word_embeddings
# It should be done!
return output_state_dict
def _lowerCAmelCase ( ) -> Optional[Any]:
# Create the argument parser.
__A : List[Any] = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=__snake_case , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=__snake_case , help='An optional config json file describing the pre-trained model.' , )
__A : Optional[Any] = parser.parse_args()
# Extract the basename.
__A : List[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
__A : Optional[Any] = torch.load(__snake_case , map_location='cpu' )
else:
__A : List[Any] = torch.load(args.path_to_checkpoint , map_location='cpu' )
__A : Dict = input_state_dict.get('args' , __snake_case )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__A : int = 'gelu_fast'
elif ds_args.openai_gelu:
__A : Dict = 'gelu_new'
else:
__A : int = 'gelu'
else:
# in the very early days this used to be "gelu_new"
__A : Optional[Any] = 'gelu_new'
# Spell out all parameters in case the defaults change.
__A : str = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=__snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=__snake_case , summary_activation=__snake_case , summary_proj_to_labels=__snake_case , summary_first_dropout=0.1 , scale_attn_weights=__snake_case , use_cache=__snake_case , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
__A : int = GPTaConfig.from_json_file(args.config_file )
__A : Optional[Any] = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
__A : str = convert_megatron_checkpoint(__snake_case , __snake_case , __snake_case )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__snake_case , __snake_case )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__A : List[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__A : Tuple = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
__A : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'Unrecognized tokenizer_type {tokenizer_type}' )
else:
__A : int = 'gpt2'
__A : Optional[int] = AutoTokenizer.from_pretrained(__snake_case )
__A : List[str] = type(__snake_case ).__name__
__A : int = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(__snake_case )
# Save tokenizer based on args
print(f'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__snake_case )
# Store the state_dict to file.
__A : Dict = os.path.join(__snake_case , 'pytorch_model.bin' )
print(f'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__snake_case , __snake_case )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : int = size if size is not None else {'shortest_edge': 224}
__A : Any = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
__A : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size')
__A : Optional[int] = do_resize
__A : str = size
__A : List[Any] = resample
__A : Dict = do_center_crop
__A : Any = crop_size
__A : Tuple = do_rescale
__A : List[str] = rescale_factor
__A : List[Any] = do_normalize
__A : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__A : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
__A : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
__A : List[str] = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase)
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = do_resize if do_resize is not None else self.do_resize
__A : int = size if size is not None else self.size
__A : Any = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase)
__A : Dict = resample if resample is not None else self.resample
__A : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : str = crop_size if crop_size is not None else self.crop_size
__A : str = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase)
__A : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__A : Any = image_std if image_std is not None else self.image_std
__A : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A : int = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A : Any = [convert_to_rgb(_UpperCAmelCase) for image in images]
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(_UpperCAmelCase) for image in images]
if do_resize:
__A : Any = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_center_crop:
__A : Tuple = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
__A : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
__A : int = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase) | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( __snake_case : int ) -> bool:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
__A : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
__A : Tuple = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A : Union[str, Any] = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A : List[str] = list(range(2 , n + 1 ) )
__A : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A : Any = 0
# filters actual prime numbers.
__A : Dict = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Union[str, Any]:
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
__A : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : List[Any] ) -> List[str]:
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
__A : Optional[int] = [] # this list will be returns of the function.
# potential prime number factors.
__A : int = 2
__A : Tuple = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A : Any = 0
# prime factorization of 'number'
__A : Dict = prime_factorization(__snake_case )
__A : Union[str, Any] = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> str:
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A : Tuple = 0
# prime factorization of 'number'
__A : Any = prime_factorization(__snake_case )
__A : Any = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( __snake_case : List[str] ) -> str:
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
__A : List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A : List[str] = get_prime_numbers(__snake_case )
__A : Tuple = len(__snake_case )
# run variable for while-loops.
__A : Optional[int] = 0
__A : Union[str, Any] = None
# exit variable. for break up the loops
__A : str = True
while i < len_pn and loop:
__A : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : int ) -> Any:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A : List[str] = 0
while numbera != 0:
__A : Optional[int] = numbera % numbera
__A : Dict = numbera
__A : List[Any] = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : List[Any] ) -> List[Any]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A : Optional[Any] = prime_factorization(__snake_case )
__A : Union[str, Any] = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
__A : str = []
__A : Union[str, Any] = []
__A : Union[str, Any] = max(__snake_case , __snake_case )
__A : Union[str, Any] = 0
__A : Tuple = 0
__A : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A : Dict = prime_fac_a.count(__snake_case )
__A : List[str] = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
__A : int = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A : str = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[Any]:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
__A : List[Any] = 0
__A : str = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Dict:
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A : Any = p_number_a + 1 # jump to the next number
__A : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( __snake_case : Dict ) -> str:
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
__A : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Dict:
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
__A : Optional[Any] = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( __snake_case : Any , __snake_case : int ) -> List[Any]:
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A : Optional[int] = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( __snake_case : Tuple ) -> int:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
__A : Optional[int] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( __snake_case : str ) -> Dict:
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
__A : Union[str, Any] = 0
__A : List[str] = 1
__A : Dict = 1 # this will be return
for _ in range(n - 1 ):
__A : str = ans
ans += fiba
__A : List[Any] = tmp
return ans | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss']):
__A : List[str] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'sshleifer/tiny-gpt2'
__A : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__A : Optional[int] = TensorFlowBenchmark(_UpperCAmelCase)
__A : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'sgugger/tiny-distilbert-classification'
__A : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
__A : List[str] = TensorFlowBenchmark(_UpperCAmelCase)
__A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'sshleifer/tiny-gpt2'
__A : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__A : int = TensorFlowBenchmark(_UpperCAmelCase)
__A : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = 'sshleifer/tiny-gpt2'
__A : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase)
__A : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__A : Dict = TensorFlowBenchmark(_UpperCAmelCase , [config])
__A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 'sshleifer/tiny-gpt2'
__A : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase)
__A : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__A : Union[str, Any] = TensorFlowBenchmark(_UpperCAmelCase , [config])
__A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'sshleifer/tiny-gpt2'
__A : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__A : Any = TensorFlowBenchmark(_UpperCAmelCase)
__A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'sshleifer/tiny-gpt2'
__A : Optional[Any] = AutoConfig.from_pretrained(_UpperCAmelCase)
__A : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__A : Tuple = TensorFlowBenchmark(_UpperCAmelCase , [config])
__A : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = 'patrickvonplaten/t5-tiny-random'
__A : Optional[Any] = AutoConfig.from_pretrained(_UpperCAmelCase)
__A : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__A : Union[str, Any] = TensorFlowBenchmark(_UpperCAmelCase , configs=[config])
__A : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU')) == 0 , 'Cannot do xla on CPU.')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'sshleifer/tiny-gpt2'
__A : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__A : Tuple = TensorFlowBenchmark(_UpperCAmelCase)
__A : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , 'inf_time.csv') , inference_memory_csv_file=os.path.join(_UpperCAmelCase , 'inf_mem.csv') , env_info_csv_file=os.path.join(_UpperCAmelCase , 'env.csv') , multi_process=_UpperCAmelCase , )
__A : str = TensorFlowBenchmark(_UpperCAmelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_time.csv')).exists())
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_mem.csv')).exists())
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'env.csv')).exists())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_UpperCAmelCase):
self.assertTrue(hasattr(_UpperCAmelCase , 'sequential'))
self.assertTrue(hasattr(_UpperCAmelCase , 'cumulative'))
self.assertTrue(hasattr(_UpperCAmelCase , 'current'))
self.assertTrue(hasattr(_UpperCAmelCase , 'total'))
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , 'log.txt') , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__A : int = TensorFlowBenchmark(_UpperCAmelCase)
__A : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'log.txt')).exists()) | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase__ : Optional[int] = 3
def _lowerCAmelCase ( __snake_case : int ) -> int:
print('Generating primitive root of p' )
while True:
__A : Optional[int] = random.randrange(3 , __snake_case )
if pow(__snake_case , 2 , __snake_case ) == 1:
continue
if pow(__snake_case , __snake_case , __snake_case ) == 1:
continue
return g
def _lowerCAmelCase ( __snake_case : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
__A : Tuple = rabin_miller.generate_large_prime(__snake_case ) # select large prime number.
__A : List[Any] = primitive_root(__snake_case ) # one primitive root on modulo p.
__A : Optional[Any] = random.randrange(3 , __snake_case ) # private_key -> have to be greater than 2 for safety.
__A : Any = cryptomath.find_mod_inverse(pow(__snake_case , __snake_case , __snake_case ) , __snake_case )
__A : List[str] = (key_size, e_a, e_a, p)
__A : Dict = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( __snake_case : str , __snake_case : int ) -> None:
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__A ,__A : str = generate_key(__snake_case )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , 'w' ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , 'w' ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def _lowerCAmelCase ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase__ : int = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
lowercase__ : Union[str, Any] = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
lowercase__ : Optional[int] = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string'), 'prediction_text': datasets.Value('string')},
'references': {
'id': datasets.Value('string'),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
},
}) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__A : List[Any] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__A : Optional[int] = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase)
return score | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def _lowerCAmelCase ( ) -> int:
from torch.utils.cpp_extension import load
__A : Tuple = Path(__snake_case ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__A : Any = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = inspect.getfile(accelerate.test_utils)
__A : Any = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
__A : List[str] = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_distributed_data_loop.py'])
__A : List[Any] = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_ops.py'])
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.')
__A : Optional[Any] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.')
__A : Dict = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only')
__A : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1'):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
lowercase__ : List[Any] = Accelerator()
lowercase__ : Optional[int] = (accelerator.state.process_index + 2, 10)
lowercase__ : Optional[int] = torch.randint(0, 10, shape).to(accelerator.device)
lowercase__ : Optional[Any] = ''''''
lowercase__ : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase__ : Optional[Any] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''sequence-classification'''
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
if type(_UpperCAmelCase) == dict:
__A : List[str] = Namespace(**_UpperCAmelCase)
__A : str = glue_output_modes[hparams.task]
__A : str = glue_tasks_num_labels[hparams.task]
super().__init__(_UpperCAmelCase , _UpperCAmelCase , self.mode)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return self.model(**_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__A : Any = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__A : str = self(**_UpperCAmelCase)
__A : Optional[int] = outputs[0]
__A : Optional[int] = self.trainer.lr_schedulers[0]['scheduler']
__A : Tuple = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.hparams
__A : str = processors[args.task]()
__A : Tuple = processor.get_labels()
for mode in ["train", "dev"]:
__A : Optional[int] = self._feature_file(_UpperCAmelCase)
if os.path.exists(_UpperCAmelCase) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , _UpperCAmelCase)
else:
logger.info('Creating features from dataset file at %s' , args.data_dir)
__A : Dict = (
processor.get_dev_examples(args.data_dir)
if mode == 'dev'
else processor.get_train_examples(args.data_dir)
)
__A : List[str] = convert_examples_to_features(
_UpperCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , _UpperCAmelCase)
torch.save(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
'''simple docstring'''
__A : Dict = 'dev' if mode == 'test' else mode
__A : Any = self._feature_file(_UpperCAmelCase)
logger.info('Loading features from cached file %s' , _UpperCAmelCase)
__A : Optional[int] = torch.load(_UpperCAmelCase)
__A : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__A : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
__A : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
__A : int = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
__A : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) , batch_size=_UpperCAmelCase , shuffle=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__A : Any = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__A : str = self(**_UpperCAmelCase)
__A ,__A : Optional[int] = outputs[:2]
__A : Dict = logits.detach().cpu().numpy()
__A : Union[str, Any] = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = torch.stack([x['val_loss'] for x in outputs]).mean().detach().cpu().item()
__A : Optional[Any] = np.concatenate([x['pred'] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
__A : str = np.argmax(_UpperCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
__A : Any = np.squeeze(_UpperCAmelCase)
__A : Union[str, Any] = np.concatenate([x['target'] for x in outputs] , axis=0)
__A : List[Any] = [[] for _ in range(out_label_ids.shape[0])]
__A : List[str] = [[] for _ in range(out_label_ids.shape[0])]
__A : int = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , _UpperCAmelCase , _UpperCAmelCase)}
__A : Optional[Any] = dict(results.items())
__A : Tuple = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A ,__A ,__A : str = self._eval_end(_UpperCAmelCase)
__A : int = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A ,__A ,__A : List[str] = self._eval_end(_UpperCAmelCase)
__A : Union[str, Any] = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase)
parser.add_argument(
'--max_seq_length' , default=128 , type=_UpperCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=_UpperCAmelCase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets')
return parser
def _lowerCAmelCase ( ) -> List[Any]:
__A : str = argparse.ArgumentParser()
add_generic_args(__snake_case , os.getcwd() )
__A : Union[str, Any] = GLUETransformer.add_model_specific_args(__snake_case , os.getcwd() )
__A : Any = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__A : Optional[int] = os.path.join(
'./results' , f'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
__A : int = GLUETransformer(__snake_case )
__A : int = generic_train(__snake_case , __snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__A : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__snake_case ) )
__A : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__snake_case )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : float ) -> float:
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( __snake_case : float ) -> float:
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : List[str] = getLogger(__name__)
lowercase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : str , __snake_case : str , __snake_case : int = 8 , __snake_case : str = DEFAULT_DEVICE , __snake_case : Union[str, Any]=False , __snake_case : Optional[int]="summarization" , __snake_case : Dict=None , **__snake_case : List[str] , ) -> Dict:
__A : Any = Path(__snake_case ).open('w' , encoding='utf-8' )
__A : List[str] = str(__snake_case )
__A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__snake_case ).to(__snake_case )
if fpaa:
__A : Tuple = model.half()
__A : Dict = AutoTokenizer.from_pretrained(__snake_case )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
__A : List[str] = time.time()
# update config with task specific params
use_task_specific_params(__snake_case , __snake_case )
if prefix is None:
__A : Dict = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(__snake_case , __snake_case ) ) ):
__A : Tuple = [prefix + text for text in examples_chunk]
__A : List[Any] = tokenizer(__snake_case , return_tensors='pt' , truncation=__snake_case , padding='longest' ).to(__snake_case )
__A : Any = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__snake_case , )
__A : Dict = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__A : List[str] = int(time.time() - start_time ) # seconds
__A : Dict = len(__snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _lowerCAmelCase ( ) -> Optional[int]:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _lowerCAmelCase ( __snake_case : Tuple=True ) -> int:
__A : Any = argparse.ArgumentParser()
parser.add_argument('model_name' , type=__snake_case , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=__snake_case , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=__snake_case , help='where to save summaries' )
parser.add_argument('--reference_path' , type=__snake_case , required=__snake_case , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=__snake_case , required=__snake_case , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=__snake_case , required=__snake_case , default=__snake_case , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=__snake_case , required=__snake_case , default=__snake_case , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=__snake_case , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__snake_case , default=8 , required=__snake_case , help='batch size' )
parser.add_argument(
'--n_obs' , type=__snake_case , default=-1 , required=__snake_case , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=__snake_case , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__A ,__A : Optional[Any] = parser.parse_known_args()
__A : List[str] = parse_numeric_n_bool_cl_kwargs(__snake_case )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
__A : List[Any] = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__A : Optional[Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__A : Union[str, Any] = generate_summaries_or_translations(
__snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__A : Dict = calculate_bleu if 'translation' in args.task else calculate_rouge
__A : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
__A : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__snake_case )]
__A : dict = score_fn(__snake_case , __snake_case )
scores.update(__snake_case )
if args.dump_args:
scores.update(__snake_case )
if args.info:
__A : Union[str, Any] = args.info
if verbose:
print(__snake_case )
if args.score_path is not None:
json.dump(__snake_case , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''dandelin/vilt-b32-finetuned-vqa'''
lowerCAmelCase = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
lowerCAmelCase = '''image_qa'''
lowerCAmelCase = AutoProcessor
lowerCAmelCase = AutoModelForVisualQuestionAnswering
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''text''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
requires_backends(self , ['vision'])
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return self.pre_processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors='pt')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
with torch.no_grad():
return self.model(**_UpperCAmelCase).logits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = outputs.argmax(-1).item()
return self.model.config.idalabel[idx] | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = (3, 32, 128)
__A : List[str] = tempfile.mkdtemp()
# fmt: off
__A : Any = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__A : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
__A : Tuple = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
__A : Any = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
__A : Optional[int] = Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1))
return image_input
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_tokenizer()
__A : List[Any] = self.get_image_processor()
__A : List[Any] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_tokenizer()
__A : List[str] = self.get_image_processor()
__A : int = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Optional[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Any = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : int = self.prepare_image_inputs()
__A : Tuple = image_processor(_UpperCAmelCase , return_tensors='np')
__A : Optional[Any] = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = 'test'
__A : Union[str, Any] = processor(text=_UpperCAmelCase)
__A : Tuple = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : List[str] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'test'
__A : Dict = self.prepare_image_inputs()
__A : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__A : str = processor.char_decode(_UpperCAmelCase)
__A : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase)
__A : Optional[int] = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Dict = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : int = None
__A : Any = self.prepare_image_inputs()
__A : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Optional[int] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = torch.randn(1 , 27 , 38)
__A : Any = torch.randn(1 , 27 , 5_0257)
__A : List[Any] = torch.randn(1 , 27 , 3_0522)
__A : List[Any] = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds']) | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase__ : Any = []
def _lowerCAmelCase ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int ) -> bool:
for i in range(len(__snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(__snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def _lowerCAmelCase ( __snake_case : list[list[int]] , __snake_case : int ) -> bool:
if row >= len(__snake_case ):
solution.append(__snake_case )
printboard(__snake_case )
print()
return True
for i in range(len(__snake_case ) ):
if is_safe(__snake_case , __snake_case , __snake_case ):
__A : Any = 1
solve(__snake_case , row + 1 )
__A : Tuple = 0
return False
def _lowerCAmelCase ( __snake_case : list[list[int]] ) -> None:
for i in range(len(__snake_case ) ):
for j in range(len(__snake_case ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowercase__ : List[Any] = 8
lowercase__ : Any = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution)) | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase)
def __call__( self):
'''simple docstring'''
__A : int = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__A : Dict = 1
__A : int = self.unet(_UpperCAmelCase , _UpperCAmelCase).sample
__A : Any = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase).prev_sample
__A : int = scheduler_output - scheduler_output + torch.ones_like(_UpperCAmelCase)
return result | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import torch
from torch import nn
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=False):
'''simple docstring'''
super().__init__()
__A : Optional[Any] = n_token
__A : Tuple = d_embed
__A : Dict = d_proj
__A : List[Any] = cutoffs + [n_token]
__A : Optional[int] = [0] + self.cutoffs
__A : Optional[int] = div_val
__A : Any = self.cutoffs[0]
__A : Optional[Any] = len(self.cutoffs) - 1
__A : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__A : str = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
__A : str = nn.Parameter(torch.zeros(self.n_clusters))
__A : Any = nn.ModuleList()
__A : str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase)))
else:
self.out_projs.append(_UpperCAmelCase)
self.out_layers.append(nn.Linear(_UpperCAmelCase , _UpperCAmelCase))
else:
for i in range(len(self.cutoffs)):
__A ,__A : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__A : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_UpperCAmelCase , _UpperCAmelCase)))
self.out_layers.append(nn.Linear(_UpperCAmelCase , r_idx - l_idx))
__A : Tuple = keep_order
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if proj is None:
__A : Any = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__A : Optional[int] = nn.functional.linear(_UpperCAmelCase , proj.t().contiguous())
__A : Optional[Any] = nn.functional.linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
__A : Any = hidden[..., :-1, :].contiguous()
__A : int = labels[..., 1:].contiguous()
__A : int = hidden.view(-1 , hidden.size(-1))
__A : Union[str, Any] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('Input and labels should have the same size in the batch dimension.')
else:
__A : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
__A : int = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
__A : str = labels != -100
__A : Dict = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device)
__A : Dict = (
-nn.functional.log_softmax(_UpperCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
__A : List[Any] = nn.functional.log_softmax(_UpperCAmelCase , dim=-1)
else:
# construct weights and biases
__A ,__A : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
__A ,__A : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__A : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
__A : List[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
__A : Dict = self.out_layers[i].weight
__A : Dict = self.out_layers[i].bias
if i == 0:
__A : str = torch.cat([weight_i, self.cluster_weight] , dim=0)
__A : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_UpperCAmelCase)
biases.append(_UpperCAmelCase)
__A ,__A ,__A : Any = weights[0], biases[0], self.out_projs[0]
__A : str = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = nn.functional.log_softmax(_UpperCAmelCase , dim=1)
if labels is None:
__A : Dict = hidden.new_empty((head_logit.size(0), self.n_token))
else:
__A : Optional[int] = torch.zeros_like(_UpperCAmelCase , dtype=hidden.dtype , device=hidden.device)
__A : Any = 0
__A : int = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase) - 1):
__A ,__A : List[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__A : Tuple = (labels >= l_idx) & (labels < r_idx)
__A : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__A : Optional[int] = labels.index_select(0 , _UpperCAmelCase) - l_idx
__A : Optional[int] = head_logprob.index_select(0 , _UpperCAmelCase)
__A : List[Any] = hidden.index_select(0 , _UpperCAmelCase)
else:
__A : List[str] = hidden
if i == 0:
if labels is not None:
__A : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
__A : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
__A ,__A ,__A : Tuple = weights[i], biases[i], self.out_projs[i]
__A : Dict = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = nn.functional.log_softmax(_UpperCAmelCase , dim=1)
__A : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__A : Optional[int] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
__A : List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__A : Dict = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0 , _UpperCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.n_clusters == 0:
__A : Tuple = self._compute_logit(_UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_UpperCAmelCase , dim=-1)
else:
# construct weights and biases
__A ,__A : Tuple = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
__A ,__A : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__A : str = self.out_layers[0].weight[l_idx:r_idx]
__A : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
__A : Optional[int] = self.out_layers[i].weight
__A : Optional[int] = self.out_layers[i].bias
if i == 0:
__A : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
__A : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_UpperCAmelCase)
biases.append(_UpperCAmelCase)
__A ,__A ,__A : Optional[int] = weights[0], biases[0], self.out_projs[0]
__A : Any = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = hidden.new_empty((head_logit.size(0), self.n_token))
__A : str = nn.functional.log_softmax(_UpperCAmelCase , dim=1)
__A : Dict = [0] + self.cutoffs
for i in range(len(_UpperCAmelCase) - 1):
__A ,__A : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__A : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
__A ,__A ,__A : Any = weights[i], biases[i], self.out_projs[i]
__A : str = self._compute_logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Optional[Any] = nn.functional.log_softmax(_UpperCAmelCase , dim=1)
__A : int = head_logprob[:, -i] + tail_logprob_i
__A : Tuple = logprob_i
return out | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=0): # a graph with Node 0,1,...,N-1
'''simple docstring'''
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , _UpperCAmelCase)] for i in range(0 , _UpperCAmelCase)
] # adjacency matrix for weight
__A : List[str] = [
[math.inf for j in range(0 , _UpperCAmelCase)] for i in range(0 , _UpperCAmelCase)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = w
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__A : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowercase__ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 8 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 128 , _UpperCAmelCase = 256 , _UpperCAmelCase = 2000.0 , _UpperCAmelCase = 768 , _UpperCAmelCase = 12 , _UpperCAmelCase = 12 , _UpperCAmelCase = 64 , _UpperCAmelCase = 2048 , _UpperCAmelCase = 0.1 , ):
'''simple docstring'''
super().__init__()
__A : Tuple = nn.Sequential(
nn.Linear(_UpperCAmelCase , d_model * 4 , bias=_UpperCAmelCase) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_UpperCAmelCase) , nn.SiLU() , )
__A : Optional[Any] = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = False
__A : Any = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : Dict = nn.Dropout(p=_UpperCAmelCase)
__A : Dict = nn.ModuleList()
for lyr_num in range(_UpperCAmelCase):
# FiLM conditional T5 decoder
__A : str = DecoderLayer(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase)
self.decoders.append(_UpperCAmelCase)
__A : Dict = TaLayerNorm(_UpperCAmelCase)
__A : Optional[int] = nn.Dropout(p=_UpperCAmelCase)
__A : int = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A ,__A ,__A : Tuple = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__A : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
__A : List[str] = self.conditioning_emb(_UpperCAmelCase).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__A : Union[str, Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__A : List[Any] = torch.broadcast_to(
torch.arange(_UpperCAmelCase , device=decoder_input_tokens.device) , (batch, seq_length) , )
__A : str = self.position_encoding(_UpperCAmelCase)
__A : List[str] = self.continuous_inputs_projection(_UpperCAmelCase)
inputs += position_encodings
__A : str = self.dropout(_UpperCAmelCase)
# decoder: No padding present.
__A : int = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
__A : str = [(x, self.encoder_decoder_mask(_UpperCAmelCase , _UpperCAmelCase)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__A : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
__A : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
__A : List[str] = lyr(
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )[0]
__A : Optional[Any] = self.decoder_norm(_UpperCAmelCase)
__A : Optional[int] = self.post_dropout(_UpperCAmelCase)
__A : Optional[Any] = self.spec_out(_UpperCAmelCase)
return spec_out
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1e-6):
'''simple docstring'''
super().__init__()
__A : str = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Union[str, Any] = self.layer[0](
_UpperCAmelCase , conditioning_emb=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
if encoder_hidden_states is not None:
__A : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0).to(
encoder_hidden_states.dtype)
__A : Dict = self.layer[1](
_UpperCAmelCase , key_value_states=_UpperCAmelCase , attention_mask=_UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__A : List[Any] = self.layer[-1](_UpperCAmelCase , _UpperCAmelCase)
return (hidden_states,)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : int = TaLayerNorm(_UpperCAmelCase)
__A : int = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase)
__A : Optional[Any] = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase)
__A : Dict = nn.Dropout(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : List[str] = self.layer_norm(_UpperCAmelCase)
if conditioning_emb is not None:
__A : List[Any] = self.FiLMLayer(_UpperCAmelCase , _UpperCAmelCase)
# Self-attention block
__A : Any = self.attention(_UpperCAmelCase)
__A : Any = hidden_states + self.dropout(_UpperCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Any = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase)
__A : List[str] = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase)
__A : List[Any] = nn.Dropout(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Tuple = self.layer_norm(_UpperCAmelCase)
__A : str = self.attention(
_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , attention_mask=attention_mask.squeeze(1) , )
__A : List[Any] = hidden_states + self.dropout(_UpperCAmelCase)
return layer_output
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Any = TaDenseGatedActDense(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase)
__A : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase)
__A : int = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase)
__A : Optional[int] = nn.Dropout(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
__A : Optional[Any] = self.layer_norm(_UpperCAmelCase)
if conditioning_emb is not None:
__A : Optional[int] = self.film(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = self.DenseReluDense(_UpperCAmelCase)
__A : Any = hidden_states + self.dropout(_UpperCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : List[str] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : List[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : Optional[int] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
__A : Tuple = nn.Dropout(_UpperCAmelCase)
__A : Tuple = NewGELUActivation()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = self.act(self.wi_a(_UpperCAmelCase))
__A : List[Any] = self.wi_a(_UpperCAmelCase)
__A : int = hidden_gelu * hidden_linear
__A : List[Any] = self.dropout(_UpperCAmelCase)
__A : int = self.wo(_UpperCAmelCase)
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=1e-6):
'''simple docstring'''
super().__init__()
__A : Optional[int] = nn.Parameter(torch.ones(_UpperCAmelCase))
__A : Optional[Any] = eps
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=_UpperCAmelCase)
__A : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__A : Optional[Any] = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(_UpperCAmelCase , 3.0))))
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Tuple = nn.Linear(_UpperCAmelCase , out_features * 2 , bias=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = self.scale_bias(_UpperCAmelCase)
__A ,__A : str = torch.chunk(_UpperCAmelCase , 2 , -1)
__A : Optional[Any] = x * (1 + scale) + shift
return x | 8 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__A : Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith('encoder' ):
__A : Any = k.replace('.attn' , '.self_attn' )
__A : Any = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
__A : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
__A : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
__A : int = k.replace('norm3' , 'final_layer_norm' )
return k
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
__A : Optional[int] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
__A : Tuple = sd.pop(__snake_case )
__A : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
__A : str = v
lowercase__ : Tuple = ['''START''']
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any] ) -> int:
__A : List[str] = torch.load(__snake_case , map_location='cpu' )
__A : Tuple = model['model']
__A : str = BlenderbotConfig.from_json_file(__snake_case )
__A : int = BlenderbotForConditionalGeneration(__snake_case )
__A : List[Any] = m.model.state_dict().keys()
__A : Optional[int] = []
__A : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__A : Union[str, Any] = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__A : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : List[str] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None):
'''simple docstring'''
__A : List[Any] = list(poly_a or [0])[:]
__A : Optional[int] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__A : Union[str, Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__A : Optional[int] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__A : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__A : str = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
__A : Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_UpperCAmelCase) <= 1:
return dft[0]
#
__A : Dict = self.c_max_length // 2
while next_ncol > 0:
__A : Optional[Any] = [[] for i in range(_UpperCAmelCase)]
__A : Tuple = self.root**next_ncol
# First half of next step
__A : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__A : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_UpperCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__A : Optional[int] = new_dft
__A : Tuple = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.__dft('A')
__A : Optional[Any] = self.__dft('B')
__A : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__A : Dict = 2
while next_ncol <= self.c_max_length:
__A : Optional[int] = [[] for i in range(_UpperCAmelCase)]
__A : Any = self.root ** (next_ncol // 2)
__A : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__A : int = new_inverse_c
next_ncol *= 2
# Unpack
__A : Optional[int] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
'''simple docstring'''
__A : int = 'A = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
__A : Optional[Any] = 'B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
__A : str = 'A*B = ' + ' + '.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowercase__ : Dict = parser.parse_args()
if args.model_type == "bert":
lowercase__ : Dict = BertForMaskedLM.from_pretrained(args.model_name)
lowercase__ : int = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowercase__ : List[Any] = model.state_dict()
lowercase__ : Optional[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
lowercase__ : Dict = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowercase__ : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
lowercase__ : List[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowercase__ : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowercase__ : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowercase__ : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowercase__ : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowercase__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowercase__ : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowercase__ : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowercase__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowercase__ : int = state_dict['''cls.predictions.decoder.weight''']
lowercase__ : List[Any] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase__ : Optional[Any] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
lowercase__ : Union[str, Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 8 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Dict , __snake_case : Optional[Any] ) -> List[str]:
__A : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__A : Dict = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
__A : Union[str, Any] = f'{src_lang}-{tgt_lang}'
__A : Any = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=__snake_case , exist_ok=__snake_case )
__A : Union[str, Any] = os.path.join(__snake_case , 'README.md' )
print(f'Generating {path}' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
lowercase__ : Dict = Path(__file__).resolve().parent.parent.parent
lowercase__ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase__ : Any = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 8 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : Optional[int] = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase__ : Dict = {
'''camembert-base''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__A : List[str] = vocab_file
__A : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 8 | 1 |
'''simple docstring'''
import os
def _lowerCAmelCase ( __snake_case : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as in_file:
__A : int = in_file.read()
__A : List[str] = [[int(__snake_case ) for cell in row.split(',' )] for row in data.strip().splitlines()]
__A : Tuple = [[0 for cell in row] for row in grid]
__A : Any = len(grid[0] )
__A : Any = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
__A : List[str] = grid[0][0]
for i in range(1 , __snake_case ):
__A : Any = grid[0][i] + dp[0][i - 1]
for i in range(1 , __snake_case ):
__A : Optional[int] = grid[i][0] + dp[i - 1][0]
for i in range(1 , __snake_case ):
for j in range(1 , __snake_case ):
__A : int = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ : Any = '''hf-internal-testing/tiny-random-bert'''
lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ : List[Any] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase)))
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Any = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
self.assertTrue(os.path.isfile(_UpperCAmelCase))
# File is cached at the same place the second time.
__A : Tuple = cached_file(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Using a specific revision to test the full commit hash.
__A : List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='9b8c223')
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'snapshots' , _UpperCAmelCase , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
__A : Dict = cached_file('tiny-random-bert' , _UpperCAmelCase)
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
__A : Optional[int] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='aaaa')
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : int = cached_file(_UpperCAmelCase , 'conf')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase , 'does not appear to have a file named'):
__A : Any = cached_file(_UpperCAmelCase , 'conf')
with open(os.path.join(_UpperCAmelCase , 'refs' , 'main')) as f:
__A : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '.no_exist' , _UpperCAmelCase , 'conf')))
__A : List[Any] = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : str = cached_file(_UpperCAmelCase , 'conf' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
__A : List[str] = mock.Mock()
__A : Dict = 500
__A : List[str] = {}
__A : List[Any] = HTTPError
__A : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase) as mock_head:
__A : Dict = cached_file(_UpperCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=_UpperCAmelCase)
self.assertIsNone(_UpperCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , _UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , _UpperCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , _UpperCAmelCase , revision='ahaha')
__A : List[str] = get_file_from_repo('bert-base-cased' , _UpperCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : List[str] = json.loads(open(_UpperCAmelCase , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Tuple = Path(_UpperCAmelCase) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , 'a.txt') , str(_UpperCAmelCase))
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , 'b.txt')) | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> int:
while b:
__A ,__A : str = b, a % b
return a
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b )
def _lowerCAmelCase ( ) -> Any:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__A : List[Any] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__A : int = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : List[str] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_tokenizer()
__A : List[Any] = self.get_image_processor()
__A : Dict = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Optional[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[Any] = self.prepare_image_inputs()
__A : str = image_processor(_UpperCAmelCase , return_tensors='np')
__A : Tuple = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Union[str, Any] = 'lower newer'
__A : Optional[Any] = processor(text=_UpperCAmelCase)
__A : List[Any] = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Union[str, Any] = 'lower newer'
__A : Dict = self.prepare_image_inputs()
__A : Dict = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : List[str] = processor.batch_decode(_UpperCAmelCase)
__A : Union[str, Any] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Optional[Any] = 'lower newer'
__A : str = self.prepare_image_inputs()
__A : str = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase__ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''tapas'''
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase=10.0 , _UpperCAmelCase=0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="ratio" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__A : Dict = vocab_size
__A : Tuple = hidden_size
__A : Any = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = hidden_act
__A : Tuple = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : List[str] = max_position_embeddings
__A : Optional[int] = type_vocab_sizes
__A : str = initializer_range
__A : List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
__A : List[str] = positive_label_weight
__A : List[Any] = num_aggregation_labels
__A : Optional[Any] = aggregation_loss_weight
__A : Tuple = use_answer_as_supervision
__A : List[str] = answer_loss_importance
__A : Any = use_normalized_answer_loss
__A : Any = huber_loss_delta
__A : Union[str, Any] = temperature
__A : Tuple = aggregation_temperature
__A : Optional[Any] = use_gumbel_for_cells
__A : List[str] = use_gumbel_for_aggregation
__A : Tuple = average_approximation_function
__A : List[str] = cell_selection_preference
__A : Dict = answer_loss_cutoff
__A : Union[str, Any] = max_num_rows
__A : Optional[Any] = max_num_columns
__A : int = average_logits_per_cell
__A : Optional[Any] = select_one_column
__A : int = allow_empty_column_selection
__A : List[Any] = init_cell_selection_weights_to_zero
__A : int = reset_position_index_per_cell
__A : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__A : Optional[Any] = aggregation_labels
__A : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase):
__A : Optional[Any] = {int(_UpperCAmelCase): v for k, v in aggregation_labels.items()} | 8 | 1 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , **__snake_case : List[Any] ) -> Any:
__A : Optional[Any] = AutoConfig.from_pretrained(__snake_case , **__snake_case )
__A : int = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 8 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize):
'''simple docstring'''
__A : Union[str, Any] = 'bilinear'
__A : int = max_size
__A : Optional[Any] = short_edge_length
def __call__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for img in imgs:
__A ,__A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__A : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__A : Tuple = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase)
if h < w:
__A ,__A : Optional[Any] = size, scale * w
else:
__A ,__A : Optional[Any] = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase) > self.max_size:
__A : Tuple = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = newh * scale
__A : Dict = neww * scale
__A : Dict = int(neww + 0.5)
__A : Optional[int] = int(newh + 0.5)
if img.dtype == np.uinta:
__A : int = Image.fromarray(_UpperCAmelCase)
__A : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__A : Dict = np.asarray(_UpperCAmelCase)
else:
__A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__A : Dict = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase).squeeze(0)
img_augs.append(_UpperCAmelCase)
return img_augs
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__A : List[Any] = cfg.INPUT.FORMAT
__A : Dict = cfg.SIZE_DIVISIBILITY
__A : str = cfg.PAD_VALUE
__A : Union[str, Any] = cfg.INPUT.MAX_SIZE_TEST
__A : int = cfg.MODEL.DEVICE
__A : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__A : int = lambda _UpperCAmelCase: (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = tuple(max(_UpperCAmelCase) for s in zip(*[img.shape for img in images]))
__A : Dict = [im.shape[-2:] for im in images]
__A : Optional[int] = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase)
]
return torch.stack(_UpperCAmelCase), torch.tensor(_UpperCAmelCase)
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = [images]
if single_image:
assert len(_UpperCAmelCase) == 1
for i in range(len(_UpperCAmelCase)):
if isinstance(images[i] , torch.Tensor):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__A : str = torch.tensor([im.shape[:2] for im in images])
__A : List[str] = self.aug(_UpperCAmelCase)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__A : Any = [self.normalizer(_UpperCAmelCase) for x in images]
# now pad them to do the following operations
__A ,__A : Any = self.pad(_UpperCAmelCase)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__A : str = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : str ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Tuple[int, int] ) -> int:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
__A ,__A : int = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case ) | 8 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase__ : Any = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowercase__ : List[str] = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _lowerCAmelCase ( ) -> Optional[Any]:
__A : Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__A : Tuple = bs[:]
__A : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
__A : Any = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def _lowerCAmelCase ( __snake_case : Tuple ) -> str:
__A : List[str] = set()
__A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Dict = char
return pairs
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token
__A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : List[Any] = json.load(_UpperCAmelCase)
__A : Optional[Any] = {v: k for k, v in self.encoder.items()}
__A : List[str] = errors # how to handle errors in decoding
__A : Dict = bytes_to_unicode()
__A : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8') as merges_handle:
__A : Any = merges_handle.read().split('\n')[1:-1]
__A : List[str] = [tuple(merge.split()) for merge in bpe_merges]
__A : Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Optional[Any] = {}
__A : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : Optional[int] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A : Union[str, Any] = tuple(_UpperCAmelCase)
__A : int = get_pairs(_UpperCAmelCase)
if not pairs:
return token
while True:
__A : int = min(_UpperCAmelCase , key=lambda _UpperCAmelCase: self.bpe_ranks.get(_UpperCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
__A ,__A : Union[str, Any] = bigram
__A : Tuple = []
__A : List[str] = 0
while i < len(_UpperCAmelCase):
try:
__A : Any = word.index(_UpperCAmelCase , _UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__A : int = j
if word[i] == first and i < len(_UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__A : Optional[Any] = tuple(_UpperCAmelCase)
__A : Dict = new_word
if len(_UpperCAmelCase) == 1:
break
else:
__A : Union[str, Any] = get_pairs(_UpperCAmelCase)
__A : List[str] = ' '.join(_UpperCAmelCase)
__A : Tuple = word
return word
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = []
for token in re.findall(self.pat , _UpperCAmelCase):
__A : Dict = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase).split(' '))
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ''.join(_UpperCAmelCase)
__A : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : str = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : Any = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
__A : List[str] = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
__A : Tuple = token_index
writer.write(' '.join(_UpperCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : str = [self.cls_token_id]
__A : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1, 1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : int = [self.sep_token_id]
__A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase):
'''simple docstring'''
__A : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase) > 0 and not text[0].isspace()):
__A : Any = ' ' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
'''simple docstring'''
__A : Dict = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
__A : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : Dict = len(encoded_inputs['global_attention_mask']) != len(_UpperCAmelCase)
if needs_to_be_padded:
__A : Optional[Any] = len(_UpperCAmelCase) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__A : Any = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 1 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 8 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = OpenAIGPTTokenizer
lowerCAmelCase = OpenAIGPTTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__A : Dict = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : List[Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(_UpperCAmelCase))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
__A : int = 'lower'
__A : Dict = ['low', 'er</w>']
__A : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = tokens + ['<unk>']
__A : str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=15):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
__A : int = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
# Simple input
__A : Tuple = 'This is a simple input'
__A : Dict = ['This is a simple input 1', 'This is a simple input 2']
__A : int = ('This is a simple input', 'This is a pair')
__A : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length')
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length')
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length')
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length')
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE (a__ ):
pass | 8 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self , _UpperCAmelCase = 6_5536 , _UpperCAmelCase = None , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0 , _UpperCAmelCase = "fourier" , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = 0.0 , _UpperCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _UpperCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _UpperCAmelCase = "UNetMidBlock1D" , _UpperCAmelCase = None , _UpperCAmelCase = (32, 32, 64) , _UpperCAmelCase = None , _UpperCAmelCase = 8 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
__A : Dict = sample_size
# time
if time_embedding_type == "fourier":
__A : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_UpperCAmelCase , log=_UpperCAmelCase , flip_sin_to_cos=_UpperCAmelCase)
__A : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__A : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_UpperCAmelCase , downscale_freq_shift=_UpperCAmelCase)
__A : List[str] = block_out_channels[0]
if use_timestep_embedding:
__A : Optional[Any] = block_out_channels[0] * 4
__A : Optional[int] = TimestepEmbedding(
in_channels=_UpperCAmelCase , time_embed_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase , out_dim=block_out_channels[0] , )
__A : Dict = nn.ModuleList([])
__A : Dict = None
__A : Tuple = nn.ModuleList([])
__A : Tuple = None
# down
__A : Any = in_channels
for i, down_block_type in enumerate(_UpperCAmelCase):
__A : Tuple = output_channel
__A : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__A : List[str] = i == len(_UpperCAmelCase) - 1
__A : int = get_down_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_UpperCAmelCase)
# mid
__A : str = get_mid_block(
_UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_UpperCAmelCase , add_downsample=_UpperCAmelCase , )
# up
__A : Optional[int] = list(reversed(_UpperCAmelCase))
__A : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
__A : str = out_channels
else:
__A : List[Any] = block_out_channels[0]
for i, up_block_type in enumerate(_UpperCAmelCase):
__A : Optional[Any] = output_channel
__A : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_UpperCAmelCase) - 1 else final_upsample_channels
)
__A : Dict = i == len(_UpperCAmelCase) - 1
__A : str = get_up_block(
_UpperCAmelCase , num_layers=_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_UpperCAmelCase)
__A : Optional[int] = output_channel
# out
__A : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
__A : Optional[Any] = get_out_block(
out_block_type=_UpperCAmelCase , num_groups_out=_UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=_UpperCAmelCase , act_fn=_UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Any = timestep
if not torch.is_tensor(_UpperCAmelCase):
__A : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
__A : Any = timesteps[None].to(sample.device)
__A : List[Any] = self.time_proj(_UpperCAmelCase)
if self.config.use_timestep_embedding:
__A : Dict = self.time_mlp(_UpperCAmelCase)
else:
__A : Dict = timestep_embed[..., None]
__A : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
__A : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
__A : int = ()
for downsample_block in self.down_blocks:
__A ,__A : int = downsample_block(hidden_states=_UpperCAmelCase , temb=_UpperCAmelCase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__A : Optional[int] = self.mid_block(_UpperCAmelCase , _UpperCAmelCase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
__A : Any = down_block_res_samples[-1:]
__A : Optional[int] = down_block_res_samples[:-1]
__A : Any = upsample_block(_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , temb=_UpperCAmelCase)
# 5. post-process
if self.out_block:
__A : Dict = self.out_block(_UpperCAmelCase , _UpperCAmelCase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
lowercase__ : Dict = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
lowercase__ : Optional[int] = {
'''ctrl''': 2_56,
}
lowercase__ : Tuple = {
'''Pregnancy''': 16_86_29,
'''Christianity''': 76_75,
'''Explain''': 10_64_23,
'''Fitness''': 6_34_40,
'''Saving''': 6_31_63,
'''Ask''': 2_71_71,
'''Ass''': 9_59_85,
'''Joke''': 16_35_09,
'''Questions''': 4_56_22,
'''Thoughts''': 4_96_05,
'''Retail''': 5_23_42,
'''Feminism''': 16_43_38,
'''Writing''': 1_19_92,
'''Atheism''': 19_22_63,
'''Netflix''': 4_86_16,
'''Computing''': 3_96_39,
'''Opinion''': 4_32_13,
'''Alone''': 4_49_67,
'''Funny''': 5_89_17,
'''Gaming''': 4_03_58,
'''Human''': 40_88,
'''India''': 13_31,
'''Joker''': 7_71_38,
'''Diet''': 3_62_06,
'''Legal''': 1_18_59,
'''Norman''': 49_39,
'''Tip''': 7_26_89,
'''Weight''': 5_23_43,
'''Movies''': 4_62_73,
'''Running''': 2_34_25,
'''Science''': 20_90,
'''Horror''': 3_77_93,
'''Confession''': 6_05_72,
'''Finance''': 1_22_50,
'''Politics''': 1_63_60,
'''Scary''': 19_19_85,
'''Support''': 1_26_54,
'''Technologies''': 3_25_16,
'''Teenage''': 6_61_60,
'''Event''': 3_27_69,
'''Learned''': 6_74_60,
'''Notion''': 18_27_70,
'''Wikipedia''': 3_75_83,
'''Books''': 66_65,
'''Extract''': 7_60_50,
'''Confessions''': 10_27_01,
'''Conspiracy''': 7_59_32,
'''Links''': 6_36_74,
'''Narcissus''': 15_04_25,
'''Relationship''': 5_47_66,
'''Relationships''': 13_47_96,
'''Reviews''': 4_16_71,
'''News''': 42_56,
'''Translation''': 2_68_20,
'''multilingual''': 12_84_06,
}
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
__A : str = set()
__A : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : List[str] = char
__A : Optional[int] = set(__snake_case )
return pairs
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTROL_CODES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<unk>" , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(unk_token=_UpperCAmelCase , **_UpperCAmelCase)
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Any = json.load(_UpperCAmelCase)
__A : Dict = {v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8') as merges_handle:
__A : Dict = merges_handle.read().split('\n')[1:-1]
__A : Optional[Any] = [tuple(merge.split()) for merge in merges]
__A : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : int = {}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A : Optional[int] = tuple(_UpperCAmelCase)
__A : str = tuple(list(word[:-1]) + [word[-1] + '</w>'])
__A : str = get_pairs(_UpperCAmelCase)
if not pairs:
return token
while True:
__A : List[str] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase: self.bpe_ranks.get(_UpperCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
__A ,__A : Any = bigram
__A : Optional[Any] = []
__A : int = 0
while i < len(_UpperCAmelCase):
try:
__A : Any = word.index(_UpperCAmelCase , _UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__A : List[str] = j
if word[i] == first and i < len(_UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__A : str = tuple(_UpperCAmelCase)
__A : Dict = new_word
if len(_UpperCAmelCase) == 1:
break
else:
__A : List[str] = get_pairs(_UpperCAmelCase)
__A : Any = '@@ '.join(_UpperCAmelCase)
__A : List[str] = word[:-4]
__A : Any = word
return word
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = []
__A : Dict = re.findall(R'\S+\n?' , _UpperCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase).split(' ')))
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase , self.unk_token)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ' '.join(_UpperCAmelCase).replace('@@ ' , '').strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : Tuple = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : Tuple = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
__A : Any = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
__A : List[str] = token_index
writer.write(' '.join(_UpperCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> int:
if len(__snake_case ) != len(__snake_case ):
raise ValueError('String lengths must match!' )
__A : Optional[Any] = 0
for chara, chara in zip(__snake_case , __snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 2, 1] , _UpperCAmelCase=[2, 2, 4] , _UpperCAmelCase=2 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=8 , ):
'''simple docstring'''
__A : str = parent
__A : int = batch_size
__A : Optional[Any] = image_size
__A : Dict = patch_size
__A : List[str] = num_channels
__A : List[str] = embed_dim
__A : Any = depths
__A : str = num_heads
__A : List[str] = window_size
__A : Union[str, Any] = mlp_ratio
__A : Union[str, Any] = qkv_bias
__A : int = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Optional[int] = drop_path_rate
__A : List[Any] = hidden_act
__A : Optional[Any] = use_absolute_embeddings
__A : Optional[int] = patch_norm
__A : List[Any] = layer_norm_eps
__A : int = initializer_range
__A : str = is_training
__A : Dict = scope
__A : Dict = use_labels
__A : int = type_sequence_label_size
__A : Union[str, Any] = encoder_stride
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = SwinvaModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : int = model(_UpperCAmelCase)
__A : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__A : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = SwinvaForMaskedImageModeling(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__A : Dict = 1
__A : Any = SwinvaForMaskedImageModeling(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__A : List[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.type_sequence_label_size
__A : Union[str, Any] = SwinvaForImageClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = SwinvaModelTester(self)
__A : Any = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(_UpperCAmelCase)
__A : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : List[Any] = [*signature.parameters.keys()]
__A : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[Any] = True
for model_class in self.all_model_classes:
__A : Optional[Any] = True
__A : int = False
__A : List[Any] = True
__A : List[Any] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Any = outputs.attentions
__A : Any = len(self.model_tester.depths)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : str = True
__A : Union[str, Any] = config.window_size**2
__A : List[Any] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : int = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : str = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__A : int = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : int = True
__A : List[Any] = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
if hasattr(self.model_tester , 'num_hidden_states_types'):
__A : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__A : str = 2
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Tuple = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Tuple = outputs.hidden_states
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# Swinv2 has a different seq_length
__A : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__A : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A ,__A ,__A ,__A : Any = reshaped_hidden_states[0].shape
__A : Tuple = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__A : int = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = 3
__A : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__A : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__A : Any = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Any = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = SwinvaModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = _config_zero_init(_UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = model_class(config=_UpperCAmelCase)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__A : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : Tuple = model(**_UpperCAmelCase)
# verify the logits
__A : Optional[int] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Optional[Any] = torch.tensor([-0.3947, -0.4306, 0.0026]).to(_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4)) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( __snake_case : float , __snake_case : int ) -> float:
__A : int = u
for i in range(1 , __snake_case ):
__A : Optional[int] = temp * (u - i)
return temp
def _lowerCAmelCase ( ) -> None:
__A : Dict = int(input('enter the numbers of values: ' ) )
__A : list[list[float]] = []
for _ in range(__snake_case ):
y.append([] )
for i in range(__snake_case ):
for j in range(__snake_case ):
y[i].append(__snake_case )
__A : int = 0
print('enter the values of parameters in a list: ' )
__A : List[str] = list(map(__snake_case , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__snake_case ):
__A : Tuple = float(input() )
__A : Tuple = int(input('enter the value to interpolate: ' ) )
__A : Dict = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __snake_case ):
for j in range(n - i ):
__A : Dict = y[j + 1][i - 1] - y[j][i - 1]
__A : List[Any] = y[0][0]
for i in range(1 , __snake_case ):
summ += (ucal(__snake_case , __snake_case ) * y[0][i]) / math.factorial(__snake_case )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase__ : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = v.to_dict()
return d | 8 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase__ : Optional[Any] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCAmelCase ( __snake_case : str=None ) -> int:
if subparsers is not None:
__A : Tuple = subparsers.add_parser('tpu-config' , description=_description )
else:
__A : Dict = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__A : Dict = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__snake_case , default=__snake_case , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__snake_case , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__snake_case , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__A : Optional[Any] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__snake_case , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def _lowerCAmelCase ( __snake_case : Dict ) -> Optional[int]:
__A : Tuple = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__snake_case ):
__A : Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__A : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
__A : List[str] = defaults.commands
if not args.tpu_name:
__A : Tuple = defaults.tpu_name
if not args.tpu_zone:
__A : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
__A : Union[str, Any] = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__A : Optional[Any] = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __snake_case ):
__A : Tuple = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__A : str = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __snake_case ):
__A : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__A : Any = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
__A : List[str] = '; '.join(__snake_case )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__A : Dict = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(__snake_case )}' )
return
subprocess.run(__snake_case )
print('Successfully setup pod.' )
def _lowerCAmelCase ( ) -> List[str]:
__A : int = tpu_command_parser()
__A : Union[str, Any] = parser.parse_args()
tpu_command_launcher(__snake_case ) | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=None):
'''simple docstring'''
__A : int = {}
if top_k is not None:
__A : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = load_image(_UpperCAmelCase)
__A : List[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework)
return model_inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = self.model(**_UpperCAmelCase)
return model_outputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=5):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__A : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
__A : List[str] = model_outputs.logits.softmax(-1)[0]
__A ,__A : Optional[int] = probs.topk(_UpperCAmelCase)
elif self.framework == "tf":
__A : Tuple = stable_softmax(model_outputs.logits , axis=-1)[0]
__A : Dict = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase)
__A ,__A : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
__A : List[Any] = scores.tolist()
__A : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase)] | 8 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( __snake_case : int ) -> int:
if number != int(__snake_case ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__A : str = [-1] * (number + 1)
__A : Dict = 0
for i in range(1 , number + 1 ):
__A : int = sys.maxsize
__A : int = int(math.sqrt(__snake_case ) )
for j in range(1 , root + 1 ):
__A : str = 1 + answers[i - (j**2)]
__A : Dict = min(__snake_case , __snake_case )
__A : Union[str, Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : list ) -> Union[str, Any]:
_enforce_args(__snake_case , __snake_case )
if n == 0:
return 0
__A : Any = float('-inf' )
for i in range(1 , n + 1 ):
__A : Optional[int] = max(
__snake_case , prices[i - 1] + naive_cut_rod_recursive(n - i , __snake_case ) )
return max_revue
def _lowerCAmelCase ( __snake_case : int , __snake_case : list ) -> Optional[int]:
_enforce_args(__snake_case , __snake_case )
__A : str = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__snake_case , __snake_case , __snake_case )
def _lowerCAmelCase ( __snake_case : int , __snake_case : list , __snake_case : list ) -> Any:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : str = float('-inf' )
for i in range(1 , n + 1 ):
__A : List[str] = max(
__snake_case , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __snake_case , __snake_case ) , )
__A : Optional[int] = max_revenue
return max_rev[n]
def _lowerCAmelCase ( __snake_case : int , __snake_case : list ) -> Any:
_enforce_args(__snake_case , __snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('-inf' ) for _ in range(n + 1 )]
__A : Optional[int] = 0
for i in range(1 , n + 1 ):
__A : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
__A : int = max(__snake_case , prices[j - 1] + max_rev[i - j] )
__A : Optional[Any] = max_revenue_i
return max_rev[n]
def _lowerCAmelCase ( __snake_case : int , __snake_case : list ) -> List[Any]:
if n < 0:
__A : Optional[Any] = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(__snake_case )
if n > len(__snake_case ):
__A : Optional[int] = (
'Each integral piece of rod must have a corresponding price. '
f'Got n = {n} but length of prices = {len(__snake_case )}'
)
raise ValueError(__snake_case )
def _lowerCAmelCase ( ) -> Tuple:
__A : Dict = [6, 10, 12, 15, 20, 23]
__A : Tuple = len(__snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : int = 36
__A : str = top_down_cut_rod(__snake_case , __snake_case )
__A : Optional[int] = bottom_up_cut_rod(__snake_case , __snake_case )
__A : Optional[int] = naive_cut_rod_recursive(__snake_case , __snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase__ , lowercase__ , lowercase__ : Optional[int] = False, False, False
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "dict"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Audio''' , init=a__ , repr=a__ )
def __call__( self):
'''simple docstring'''
return self.pa_type
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.') from err
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {"bytes": None, "path": value}
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__A : Optional[int] = BytesIO()
sf.write(_UpperCAmelCase , value['array'] , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm'):
# "PCM" only has raw audio bytes
if value.get('sampling_rate') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object')
if value.get('bytes'):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__A : Tuple = np.frombuffer(value['bytes'] , dtype=np.intaa).astype(np.floataa) / 3_2767
else:
__A : List[Any] = np.memmap(value['path'] , dtype='h' , mode='r').astype(np.floataa) / 3_2767
__A : Optional[Any] = BytesIO(bytes())
sf.write(_UpperCAmelCase , _UpperCAmelCase , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.')
__A ,__A : Any = (value['path'], BytesIO(value['bytes'])) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.') from err
__A : Tuple = xsplitext(_UpperCAmelCase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
if file is None:
__A : List[str] = token_per_repo_id or {}
__A : Optional[int] = path.split('::')[-1]
try:
__A : str = string_to_dict(_UpperCAmelCase , config.HUB_DATASETS_URL)['repo_id']
__A : Union[str, Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__A : List[Any] = None
with xopen(_UpperCAmelCase , 'rb' , use_auth_token=_UpperCAmelCase) as f:
__A ,__A : Optional[Any] = sf.read(_UpperCAmelCase)
else:
__A ,__A : Tuple = sf.read(_UpperCAmelCase)
__A : Union[str, Any] = array.T
if self.mono:
__A : Optional[Any] = librosa.to_mono(_UpperCAmelCase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
__A : List[Any] = librosa.resample(_UpperCAmelCase , orig_sr=_UpperCAmelCase , target_sr=self.sampling_rate)
__A : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.')
return {
"bytes": Value('binary'),
"path": Value('string'),
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if pa.types.is_string(storage.type):
__A : str = pa.array([None] * len(_UpperCAmelCase) , type=pa.binary())
__A : Optional[int] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
__A : Optional[Any] = pa.array([None] * len(_UpperCAmelCase) , type=pa.string())
__A : str = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('array'):
__A : Dict = pa.array([Audio().encode_example(_UpperCAmelCase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
__A : Tuple = storage.field('bytes')
else:
__A : Any = pa.array([None] * len(_UpperCAmelCase) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
__A : List[Any] = storage.field('path')
else:
__A : Dict = pa.array([None] * len(_UpperCAmelCase) , type=pa.string())
__A : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
return array_cast(_UpperCAmelCase , self.pa_type)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase):
with xopen(_UpperCAmelCase , 'rb') as f:
__A : Union[str, Any] = f.read()
return bytes_
__A : int = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__A : Optional[int] = pa.array(
[os.path.basename(_UpperCAmelCase) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
__A : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(_UpperCAmelCase , self.pa_type) | 8 |
'''simple docstring'''
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = size
# approximate the overall size of segment tree with given value
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
__A : Optional[Any] = [0 for i in range(0 , 4 * size)]
__A : str = [0 for i in range(0 , 4 * size)] # flag for lazy update
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return idx * 2 + 1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if left_element == right_element:
__A : List[Any] = a[left_element - 1]
else:
__A : List[str] = (left_element + right_element) // 2
self.build(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.build(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase)
__A : Any = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Optional[Any] = self.lazy[idx]
__A : Optional[Any] = False
if left_element != right_element:
__A : List[Any] = self.lazy[idx]
__A : Dict = self.lazy[idx]
__A : Tuple = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__A : Optional[int] = val
if left_element != right_element:
__A : Tuple = val
__A : Any = val
__A : Tuple = True
__A : Union[str, Any] = True
return True
__A : str = (left_element + right_element) // 2
self.update(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
self.update(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : int = max(
self.segment_tree[self.left(_UpperCAmelCase)] , self.segment_tree[self.right(_UpperCAmelCase)])
return True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.flag[idx] is True:
__A : Union[str, Any] = self.lazy[idx]
__A : List[str] = False
if left_element != right_element:
__A : Union[str, Any] = self.lazy[idx]
__A : Optional[int] = self.lazy[idx]
__A : str = True
__A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__A : Any = (left_element + right_element) // 2
__A : int = self.query(self.left(_UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = self.query(self.right(_UpperCAmelCase) , mid + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return max(_UpperCAmelCase , _UpperCAmelCase)
def __str__( self):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , _UpperCAmelCase , _UpperCAmelCase) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase__ : str = 15
lowercase__ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 8 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = BioGptTokenizer
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__A : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(_UpperCAmelCase))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : int = 'lower newer'
__A : int = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = BioGptTokenizer(self.vocab_file , self.merges_file)
__A : List[Any] = 'lower'
__A : Optional[int] = ['low', 'er</w>']
__A : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = tokens + ['<unk>']
__A : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , _UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = BioGptTokenizer.from_pretrained('microsoft/biogpt')
__A : Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase)
__A : Any = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase)
__A : Any = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase)
__A : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a) | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float:
__A : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCAmelCase ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
__A : str = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase = StableDiffusionLatentUpscalePipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase = frozenset([] )
lowerCAmelCase = True
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 1
__A : str = 4
__A : Dict = (16, 16)
__A : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(_UpperCAmelCase)
return image
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
torch.manual_seed(0)
__A : Dict = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=_UpperCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=_UpperCAmelCase , only_cross_attention=_UpperCAmelCase , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
__A : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
__A : List[Any] = EulerDiscreteScheduler(prediction_type='sample')
__A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
__A : List[str] = CLIPTextModel(_UpperCAmelCase)
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__A : List[str] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0):
'''simple docstring'''
if str(_UpperCAmelCase).startswith('mps'):
__A : Optional[Any] = torch.manual_seed(_UpperCAmelCase)
else:
__A : Tuple = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = 'cpu'
__A : str = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Optional[Any] = self.get_dummy_inputs(_UpperCAmelCase)
__A : List[str] = pipe(**_UpperCAmelCase).images
__A : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3))
__A : Any = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055])
__A : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_UpperCAmelCase , 1e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
__A : int = self.get_dummy_components()
__A : Union[str, Any] = self.pipeline_class(**_UpperCAmelCase)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = self.get_dummy_inputs(_UpperCAmelCase)
__A : Union[str, Any] = 2
__A : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__A : List[str] = getattr(_UpperCAmelCase , scheduler_enum.name)
__A : Optional[Any] = scheduler_cls.from_config(pipe.scheduler.config)
__A : Any = pipe(**_UpperCAmelCase)[0]
outputs.append(_UpperCAmelCase)
assert check_same_shape(_UpperCAmelCase)
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = torch.manual_seed(33)
__A : List[str] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa)
pipe.to('cuda')
__A : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__A : Any = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
__A : Any = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , output_type='latent').images
__A : int = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type='np' , ).images[0]
__A : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5e-2
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = torch.manual_seed(33)
__A : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__A : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
__A : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
__A : int = upscaler(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=_UpperCAmelCase , output_type='np' , ).images[0]
__A : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5e-2 | 8 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 8 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.