code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
requires_backends(self , 'decord' )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Dict:
UpperCAmelCase_ : Optional[Any] = {}
if frame_sampling_rate is not None:
UpperCAmelCase_ : Optional[Any] = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase_ : Optional[Any] = num_frames
UpperCAmelCase_ : Optional[Any] = {}
if top_k is not None:
UpperCAmelCase_ : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
return super().__call__(_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 ) -> Optional[int]:
if num_frames is None:
UpperCAmelCase_ : Optional[Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase_ : List[str] = BytesIO(requests.get(_UpperCamelCase ).content )
UpperCAmelCase_ : Optional[Any] = VideoReader(_UpperCamelCase )
videoreader.seek(0 )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = num_frames * frame_sampling_rate - 1
UpperCAmelCase_ : Optional[int] = np.linspace(_UpperCamelCase , _UpperCamelCase , num=_UpperCamelCase , dtype=np.intaa )
UpperCAmelCase_ : List[Any] = videoreader.get_batch(_UpperCamelCase ).asnumpy()
UpperCAmelCase_ : Union[str, Any] = list(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Dict = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : Dict = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : str = scores.tolist()
UpperCAmelCase_ : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 |
import os
def UpperCAmelCase__ ( _A : Any ):
'''simple docstring'''
a__ =len(grid[0] )
a__ =len(_A )
a__ =0
a__ =0
a__ =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_A ):
for j in range(n_rows - 3 ):
a__ =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a__ =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a__ =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a__ =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a__ =max(
_A , _A , _A , _A )
if max_product > largest:
a__ =max_product
return largest
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[]
with open(os.path.dirname(_A ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
a__ =[[int(_A ) for i in grid[j]] for j in range(len(_A ) )]
return largest_product(_A )
if __name__ == "__main__":
print(solution())
| 188 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
UpperCAmelCase__ : str = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ : Union[str, Any] = """"""
else:
UpperCAmelCase__ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ : str = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Tuple = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = dct.pop(snake_case__ )
UpperCAmelCase__ : Any = val
def _UpperCamelCase ( ):
UpperCAmelCase__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
UpperCAmelCase__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=snake_case__ , )
UpperCAmelCase__ : Optional[int] = ViTHybridConfig(backbone_config=snake_case__ , image_size=3_8_4 , num_labels=1_0_0_0 )
UpperCAmelCase__ : Union[str, Any] = False
# load original model from timm
UpperCAmelCase__ : Optional[Any] = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : str = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
UpperCAmelCase__ : List[Any] = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ : int = """huggingface/label-files"""
UpperCAmelCase__ : Any = """imagenet-1k-id2label.json"""
UpperCAmelCase__ : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Optional[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[Any] = idalabel
UpperCAmelCase__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase__ : List[str] = ViTHybridModel(snake_case__ ).eval()
else:
UpperCAmelCase__ : Union[str, Any] = ViTHybridForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# create image processor
UpperCAmelCase__ : List[str] = create_transform(**resolve_data_config({} , model=snake_case__ ) )
UpperCAmelCase__ : Tuple = transform.transforms
UpperCAmelCase__ : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
UpperCAmelCase__ : Optional[int] = ViTHybridImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : int = transform(snake_case__ ).unsqueeze(0 )
UpperCAmelCase__ : Dict = processor(snake_case__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ , snake_case__ )
# verify logits
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(snake_case__ )
UpperCAmelCase__ : Optional[int] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase__ : List[str] = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1e-3 )
else:
UpperCAmelCase__ : Union[str, Any] = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
__A =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 365 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _snake_case :
lowerCAmelCase :str = PegasusConfig
lowerCAmelCase :Optional[int] = {}
lowerCAmelCase :Dict = '''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=40 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Union[str, Any] = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : Optional[int] = vocab_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Any = pad_token_id
UpperCAmelCase__ : Dict = bos_token_id
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
UpperCAmelCase__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
UpperCAmelCase__ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1)
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : Union[str, Any] = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return config, inputs_dict
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = TFPegasusModel(config=_lowerCamelCase).get_decoder()
UpperCAmelCase__ : Optional[int] = inputs_dict["""input_ids"""]
UpperCAmelCase__ : Any = input_ids[:1, :]
UpperCAmelCase__ : List[str] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase__ : int = inputs_dict["""head_mask"""]
UpperCAmelCase__ : int = 1
# first forward pass
UpperCAmelCase__ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase__ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
UpperCAmelCase__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1)
UpperCAmelCase__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1)
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase)[0]
UpperCAmelCase__ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
UpperCAmelCase__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1]))
UpperCAmelCase__ : int = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
if attention_mask is None:
UpperCAmelCase__ : Any = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase :Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase :List[str] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase :int = True
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :Dict = False
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = TFPegasusModelTester(self)
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase :Dict = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase :Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase :Tuple = '''google/pegasus-xsum'''
@cached_property
def snake_case__ ( self):
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def snake_case__ ( self , **_lowerCamelCase):
UpperCAmelCase__ : Dict = self.translate_src_text(**_lowerCamelCase)
assert self.expected_text == generated_words
def snake_case__ ( self , **_lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""tf""")
UpperCAmelCase__ : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase)
return generated_words
@slow
def snake_case__ ( self):
self._assert_generated_batch_equal_expected() | 283 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =BartphoTokenizer
a_ =False
a_ =True
def _lowercase ( self : Optional[int] ) -> Any:
super().setUp()
__lowerCamelCase : List[str] = ['▁This', '▁is', '▁a', '▁t', 'est']
__lowerCamelCase : str = dict(zip(_a , range(len(_a ) ) ) )
__lowerCamelCase : List[str] = {'unk_token': '<unk>'}
__lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase : List[str] = BartphoTokenizer(_a , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : str , **_a : Union[str, Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowercase ( self : Optional[int] , _a : str ) -> Tuple:
__lowerCamelCase : List[Any] = 'This is a là test'
__lowerCamelCase : Tuple = 'This is a<unk><unk> test'
return input_text, output_text
def _lowercase ( self : Any ) -> Tuple:
__lowerCamelCase : Any = BartphoTokenizer(_a , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase : List[Any] = 'This is a là test'
__lowerCamelCase : Optional[Any] = '▁This ▁is ▁a ▁l à ▁t est'.split()
__lowerCamelCase : Optional[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__lowerCamelCase : str = tokens + [tokenizer.unk_token]
__lowerCamelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 208 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""xlm-roberta"""
def __init__( self : Any , _a : Optional[Any]=3_0522 , _a : Optional[Any]=768 , _a : int=12 , _a : Tuple=12 , _a : str=3072 , _a : List[Any]="gelu" , _a : int=0.1 , _a : Optional[int]=0.1 , _a : Optional[int]=512 , _a : List[Any]=2 , _a : Optional[Any]=0.02 , _a : str=1e-12 , _a : str=1 , _a : str=0 , _a : Optional[Any]=2 , _a : Optional[int]="absolute" , _a : int=True , _a : Tuple=None , **_a : Any , ) -> Dict:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : str = position_embedding_type
__lowerCamelCase : List[Any] = use_cache
__lowerCamelCase : int = classifier_dropout
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 208 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase = Lock()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_a : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_a : List[str] = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_a : Dict = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_a : Union[str, Any] = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : int = []
_a : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_a : Dict = Pipe()
_a : Optional[Any] = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_a : List[Any] = temp_rs
_a : Optional[int] = temp_rr
for i in range(1 , len(__a ) - 1 ):
_a : int = Pipe()
_a : List[str] = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_a : List[str] = temp_rs
_a : str = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
_a : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*__a )
_a : Tuple = odd_even_transposition(__a )
print('Sorted List\n' )
print(*__a )
if __name__ == "__main__":
main()
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = 2 , UpperCamelCase_ = 1 , UpperCamelCase_ = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
return (pow(UpperCamelCase_ , 2 ) + step) % modulus
for _ in range(UpperCamelCase_ ):
# These track the position within the cycle detection logic.
__SCREAMING_SNAKE_CASE = seed
__SCREAMING_SNAKE_CASE = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__SCREAMING_SNAKE_CASE = rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__SCREAMING_SNAKE_CASE = gcd(hare - tortoise , UpperCamelCase_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__SCREAMING_SNAKE_CASE = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__magic_name__ = parser.parse_args()
__magic_name__ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
__magic_name__ = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 100 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( _lowerCamelCase ):
snake_case_ = 42
@flax_register_to_config
class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
snake_case_ = 32
snake_case_ = 4
snake_case_ = 4
snake_case_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case_ = False
snake_case_ = (320, 640, 1_280, 1_280)
snake_case_ = 2
snake_case_ = 8
snake_case_ = None
snake_case_ = 1_280
snake_case_ = 0.0
snake_case_ = False
snake_case_ = jnp.floataa
snake_case_ = True
snake_case_ = 0
snake_case_ = False
def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ):
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ ,snake_case_ = jax.random.split(lowercase_ )
snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def A_ ( self : List[str] ):
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
snake_case_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
snake_case_ = down_blocks
# mid
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ = []
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )]
snake_case_ = i == len(lowercase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ = FlaxCrossAttnUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase_ )
snake_case_ = output_channel
snake_case_ = up_blocks
# out
snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ):
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(lowercase_ , 0 )
snake_case_ = self.time_proj(lowercase_ )
snake_case_ = self.time_embedding(lowercase_ )
# 2. pre-process
snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
snake_case_ = self.conv_in(lowercase_ )
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase_ , lowercase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ = new_down_block_res_samples
# 4. mid
snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = up_block(
lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , )
else:
snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train )
# 6. post-process
snake_case_ = self.conv_norm_out(lowercase_ )
snake_case_ = nn.silu(lowercase_ )
snake_case_ = self.conv_out(lowercase_ )
snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase_ )
| 56 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _UpperCamelCase (a__ :str = "laptop" ):
"""simple docstring"""
UpperCamelCase__ = f"""https://www.amazon.in/laptop/s?k={product}"""
UpperCamelCase__ = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
UpperCamelCase__ = BeautifulSoup(requests.get(a__ , headers=a__ ).text )
# Initialize a Pandas dataframe with the column titles
UpperCamelCase__ = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
UpperCamelCase__ = item.ha.text
UpperCamelCase__ = """https://www.amazon.in/""" + item.ha.a["""href"""]
UpperCamelCase__ = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
UpperCamelCase__ = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
UpperCamelCase__ = """Not available"""
try:
UpperCamelCase__ = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
UpperCamelCase__ = """"""
try:
UpperCamelCase__ = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
UpperCamelCase__ = float("""nan""" )
except AttributeError:
pass
UpperCamelCase__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
UpperCamelCase__ = """ """
UpperCamelCase__ = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase__ = "headphones"
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 87 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if hor == 128:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 64, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCamelCase__ = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCamelCase__ = model
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 87 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def a__ ( SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=0.01 , snake_case__=1_000 ):
"""simple docstring"""
lowerCAmelCase : List[Any] = p_stop
lowerCAmelCase : Optional[Any] = max_length
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase : Dict = random.random() < self.p_stop
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False , snake_case__=True ):
"""simple docstring"""
lowerCAmelCase : Dict = [
BatchSamplerShard(snake_case__ , 2 , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
for i in range(2 )
]
lowerCAmelCase : Any = [list(snake_case__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case__ ) for shard in batch_sampler_shards] , [len(snake_case__ ) for e in expected] )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase : Tuple = [BatchSamplerShard(snake_case__ , 2 , snake_case__ , even_batches=snake_case__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=2 , snake_case__=False ):
"""simple docstring"""
random.seed(snake_case__ )
lowerCAmelCase : List[str] = list(snake_case__ )
lowerCAmelCase : Optional[int] = [
IterableDatasetShard(
snake_case__ , batch_size=snake_case__ , drop_last=snake_case__ , num_processes=snake_case__ , process_index=snake_case__ , split_batches=snake_case__ , )
for i in range(snake_case__ )
]
lowerCAmelCase : str = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case__ )
iterable_dataset_lists.append(list(snake_case__ ) )
lowerCAmelCase : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase : Tuple = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
self.assertTrue(len(snake_case__ ) % shard_batch_size == 0 )
lowerCAmelCase : List[Any] = []
for idx in range(0 , len(snake_case__ ) , snake_case__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case__ ) < len(snake_case__ ):
reference += reference
self.assertListEqual(snake_case__ , reference[: len(snake_case__ )] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = 42
lowerCAmelCase : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
# Edge case with a very small dataset
lowerCAmelCase : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = BatchSampler(range(16 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[Any] = SkipBatchSampler(snake_case__ , 2 )
self.assertListEqual(list(snake_case__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase : Optional[int] = skip_first_batches(snake_case__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase__ ( self ):
"""simple docstring"""
Accelerator()
lowerCAmelCase : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 108 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = 'autoformer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self :Dict , _lowercase :Optional[int] = None , _lowercase :Optional[int] = None , _lowercase :str = "student_t" , _lowercase :str = "nll" , _lowercase :int = 1 , _lowercase :List[int] = [1, 2, 3, 4, 5, 6, 7] , _lowercase :bool = True , _lowercase :int = 0 , _lowercase :int = 0 , _lowercase :int = 0 , _lowercase :int = 0 , _lowercase :Optional[List[int]] = None , _lowercase :Optional[List[int]] = None , _lowercase :int = 64 , _lowercase :int = 2 , _lowercase :int = 2 , _lowercase :int = 2 , _lowercase :int = 2 , _lowercase :int = 32 , _lowercase :int = 32 , _lowercase :str = "gelu" , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :float = 0.1 , _lowercase :int = 1_00 , _lowercase :float = 0.02 , _lowercase :bool = True , _lowercase :List[Any]=True , _lowercase :int = 10 , _lowercase :int = 25 , _lowercase :int = 3 , **_lowercase :Optional[int] , ):
'''simple docstring'''
lowercase__ = prediction_length
lowercase__ = context_length if context_length is not None else prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ = cardinality
else:
lowercase__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
# Autoformer
lowercase__ = label_length
lowercase__ = moving_average
lowercase__ = autocorrelation_factor
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 359 |
def _A ( __magic_name__ ):
try:
lowercase__ = float(__magic_name__ )
except ValueError:
raise ValueError("Please enter a valid number" )
lowercase__ = decimal - int(__magic_name__ )
if fractional_part == 0:
return int(__magic_name__ ), 1
else:
lowercase__ = len(str(__magic_name__ ).split("." )[1] )
lowercase__ = int(decimal * (10**number_of_frac_digits) )
lowercase__ = 10**number_of_frac_digits
lowercase__ , lowercase__ = denominator, numerator
while True:
lowercase__ = dividend % divisor
if remainder == 0:
break
lowercase__ , lowercase__ = divisor, remainder
lowercase__ , lowercase__ = numerator / divisor, denominator / divisor
return int(__magic_name__ ), int(__magic_name__ )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction("67") = }""")
print(F"""{decimal_to_fraction("45.0") = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction("6.25") = }""")
print(F"""{decimal_to_fraction("78td") = }""")
| 201 | 0 |
"""simple docstring"""
UpperCamelCase : List[str] = 9.8_06_65
def A ( snake_case :float , snake_case :float , snake_case :float = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 316 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__magic_name__: Tuple = logging.get_logger(__name__)
class snake_case__ ( a_ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 364 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__magic_name__: str = logging.getLogger(__name__)
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""", type=_A, default="""data/dump.txt""", help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""", type=_A, default="""bert""", choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""", type=_A, default="""bert-base-uncased""", help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""", type=_A, default="""data/dump""", help="""The dump file prefix.""" )
__magic_name__ : Dict = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__magic_name__ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__magic_name__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__magic_name__ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__magic_name__ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : int = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path, """r""", encoding="""utf8""" ) as fp:
__magic_name__ : Tuple = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(_A )} examples to process.' )
__magic_name__ : List[Any] = []
__magic_name__ : str = 0
__magic_name__ : str = 10000
__magic_name__ : Dict = time.time()
for text in data:
__magic_name__ : Tuple = f'{bos} {text.strip()} {sep}'
__magic_name__ : Optional[int] = tokenizer.encode(_A, add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
__magic_name__ : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__magic_name__ : Any = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(_A )} examples processed.' )
__magic_name__ : Tuple = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__magic_name__ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
__magic_name__ : Optional[int] = [np.uintaa(_A ) for d in rslt]
else:
__magic_name__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_A, """wb""" ) as handle:
pickle.dump(rslt_, _A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 138 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=64 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> List[Any]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self ) -> str:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ) -> List[str]:
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = True
A__ = GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = True
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,use_cache=__UpperCAmelCase )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
A__ = torch.cat([input_mask, next_mask] ,dim=-1 )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,past_key_values=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-3 ) )
def snake_case__ ( self ) -> Dict:
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__( __A , __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : List[Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> Tuple:
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=64 ,num_attention_heads=8 )
def snake_case__ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Dict:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def snake_case__ ( self ) -> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self ) -> str:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] ,config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
A__ = original_model(__UpperCAmelCase ).last_hidden_state
A__ = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 1_0.0}
A__ = GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
@require_torch
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> int:
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
A__ = tokenizer('My favorite food is' ,return_tensors='pt' ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=20 )
A__ = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 221 | """simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'decord' )
self.check_model_type(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> int:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ) -> Union[str, Any]:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
A__ = BytesIO(requests.get(__UpperCAmelCase ).content )
A__ = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(__UpperCAmelCase ,__UpperCAmelCase ,num=__UpperCAmelCase ,dtype=np.intaa )
A__ = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A__ = list(__UpperCAmelCase )
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 221 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_A = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Dict = errors # how to handle errors in decoding
lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : str = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(UpperCamelCase )
lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Tuple = word
return word
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for token in re.findall(self.pat , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase )
lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Tuple = """ """ + text
return (text, kwargs)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 212 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : List[Any] , UpperCamelCase : Dict=-1 ) -> List[Any]:
"""simple docstring"""
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ : Optional[int] = label_idx
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = mode.value
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
else:
lowerCAmelCase__ : Optional[int] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( self : Any , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ : Union[str, Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCamelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowerCAmelCase ( self : str , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : List[str] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : str = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = mode.value
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = 0
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = preds_list[example_id]
lowerCAmelCase__ : List[Any] = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def _lowerCAmelCase ( self : Dict , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 212 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE_ = '''src/transformers'''
SCREAMING_SNAKE_CASE_ = '''docs/source/en/tasks'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start prompt.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
__lowerCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCAmelCase , set() )
__lowerCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
__lowerCAmelCase = get_model_list_for_task(_lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 301 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 301 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : ScoreSdeVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 2000 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) ->Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : str = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Optional[int] = self.unet
SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Dict = sample.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase__ )
self.scheduler.set_sigmas(lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : str = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : List[str] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step_correct(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ).sample
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step_pred(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : List[str] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 362 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = XLMProphetNetTokenizer
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self ) ->Dict:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[Any] = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = '''[PAD]'''
SCREAMING_SNAKE_CASE : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowerCamelCase ) , 1012 )
def __lowerCAmelCase ( self ) ->List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMProphetNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self ) ->List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Hello World!'''
SCREAMING_SNAKE_CASE : int = [3_5389, 6672, 49, 2]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def __lowerCAmelCase ( self ) ->int:
# fmt: off
SCREAMING_SNAKE_CASE : str = {'''input_ids''': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 19 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(A ):
values.append(rng.randint(0, vocab_size - 1 ) )
a = np.array(A, dtype=jnp.intaa ).reshape(A )
return output
def __magic_name__ ( A : Dict, A : Union[str, Any]=None ):
'''simple docstring'''
a = ids_tensor(A, vocab_size=2, rng=A )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = ()
def __UpperCAmelCase ( self : int ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs["input_ids"].shape[-1] // 2
a = inputs["input_ids"][:max_batch_size, :sequence_length]
a = jnp.ones_like(__lowerCamelCase )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = pt_model_class(__lowerCamelCase ).eval()
a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
a = flax_model.generate(__lowerCamelCase ).sequences
a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = "Hello world"
a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
a = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 107 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'conditional_detr'
A : Optional[int] = ['past_key_values']
A : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[int] = backbone_config.get("model_type" )
snake_case_ : str = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Tuple = config_class.from_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = use_timm_backbone
snake_case_ : Optional[Any] = backbone_config
snake_case_ : str = num_channels
snake_case_ : Optional[Any] = num_queries
snake_case_ : Optional[Any] = d_model
snake_case_ : Optional[Any] = encoder_ffn_dim
snake_case_ : str = encoder_layers
snake_case_ : int = encoder_attention_heads
snake_case_ : int = decoder_ffn_dim
snake_case_ : Optional[Any] = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : List[str] = dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = activation_dropout
snake_case_ : List[Any] = activation_function
snake_case_ : Dict = init_std
snake_case_ : str = init_xavier_std
snake_case_ : Tuple = encoder_layerdrop
snake_case_ : int = decoder_layerdrop
snake_case_ : List[Any] = encoder_layers
snake_case_ : int = auxiliary_loss
snake_case_ : int = position_embedding_type
snake_case_ : List[str] = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Optional[Any] = dilation
# Hungarian matcher
snake_case_ : Tuple = class_cost
snake_case_ : Tuple = bbox_cost
snake_case_ : str = giou_cost
# Loss coefficients
snake_case_ : Union[str, Any] = mask_loss_coefficient
snake_case_ : Tuple = dice_loss_coefficient
snake_case_ : List[str] = cls_loss_coefficient
snake_case_ : List[str] = bbox_loss_coefficient
snake_case_ : List[str] = giou_loss_coefficient
snake_case_ : Any = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ) -> int:
return self.d_model
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Optional[int] = self.backbone_config.to_dict()
snake_case_ : Optional[int] = self.__class__.model_type
return output
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = version.parse('1.11' )
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCAmelCase ( self ) -> float:
return 1e-5
@property
def _lowerCAmelCase ( self ) -> int:
return 12
| 356 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ) -> Dict:
snake_case_ : int = []
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
self.events.append("on_init_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
self.events.append("on_train_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_train_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
self.events.append("on_epoch_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.events.append("on_epoch_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_step_begin" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_step_end" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_evaluate" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
self.events.append("on_predict" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
self.events.append("on_save" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
self.events.append("on_log" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
self.events.append("on_prediction_step" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Optional[int] = tempfile.mkdtemp()
def _lowerCAmelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.output_dir )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Dict:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : Any = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = RegressionModelConfig(a=_SCREAMING_SNAKE_CASE , b=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = RegressionPreTrainedModel(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=_SCREAMING_SNAKE_CASE , report_to=[] , **_SCREAMING_SNAKE_CASE )
return Trainer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , callbacks=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , _SCREAMING_SNAKE_CASE )
else:
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : int = ["on_init_end", "on_train_begin"]
snake_case_ : Any = 0
snake_case_ : Dict = len(trainer.get_eval_dataloader() )
snake_case_ : Tuple = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(_SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Dict = self.get_trainer()
snake_case_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : List[Any] = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case_ : str = self.get_trainer()
snake_case_ : Tuple = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.remove(_SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
snake_case_ : str = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
snake_case_ : List[str] = trainer.pop_callback(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
trainer.add_callback(_SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , _SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case_ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
snake_case_ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_SCREAMING_SNAKE_CASE , self.get_expected_events(_SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
snake_case_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 36 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 364 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : DDIMScheduler ) -> int:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : str , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase : str = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowercase_ , )
UpperCAmelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Tuple = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase : Dict = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
UpperCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase : Any = self.vqvae.decode(lowercase_ ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 280 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase__ = Lock()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase =min(__snake_case , __snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase =max(__snake_case , __snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__snake_case )
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
_lowercase =[]
_lowercase =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase =Pipe()
_lowercase =Pipe()
process_array_.append(
Process(
target=__snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase =temp_rs
_lowercase =temp_rr
for i in range(1 , len(__snake_case ) - 1 ):
_lowercase =Pipe()
_lowercase =Pipe()
process_array_.append(
Process(
target=__snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase =temp_rs
_lowercase =temp_rr
process_array_.append(
Process(
target=__snake_case , args=(
len(__snake_case ) - 1,
arr[len(__snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__snake_case ) ):
_lowercase =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*__snake_case )
_lowercase =odd_even_transposition(__snake_case )
print('''Sorted List\n''' )
print(*__snake_case )
if __name__ == "__main__":
main()
| 5 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 10 , snake_case_ :int = 22 ):
__UpperCAmelCase = range(1 , snake_case_ )
__UpperCAmelCase = range(1 , snake_case_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 352 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Union[str, Any] = ["onnx"]
def __init__( self : Any , *_lowercase : Dict , **_lowercase : Any ):
requires_backends(self , ['''onnx'''] )
@classmethod
def a ( cls : str , *_lowercase : List[Any] , **_lowercase : int ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def a ( cls : Union[str, Any] , *_lowercase : List[str] , **_lowercase : Optional[int] ):
requires_backends(cls , ['''onnx'''] )
| 86 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase = True
except (ImportError, AttributeError):
UpperCamelCase = object
def lowercase_ ( *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str]):
pass
UpperCamelCase = False
UpperCamelCase = logging.get_logger('''transformers-cli/serving''')
def lowercase_ ( _lowerCamelCase : Namespace):
lowercase__ : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCamelCase , args.host , args.port , args.workers)
class snake_case_ ( __A ):
__A : dict
class snake_case_ ( __A ):
__A : List[str]
__A : Optional[List[int]]
class snake_case_ ( __A ):
__A : str
class snake_case_ ( __A ):
__A : Any
class snake_case_ ( __A ):
@staticmethod
def __UpperCamelCase ( lowercase_ : ArgumentParser ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=lowercase_ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=lowercase_ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=lowercase_ , default=88_88 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=lowercase_ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=lowercase_ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=lowercase_ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=lowercase_ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=lowercase_ )
def __init__( self : int , lowercase_ : Pipeline , lowercase_ : str , lowercase_ : int , lowercase_ : int ) -> Dict:
lowercase__ : Dict = pipeline
lowercase__ : Any = host
lowercase__ : List[str] = port
lowercase__ : List[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F'''Serving model over {host}:{port}''' )
lowercase__ : Optional[Any] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=lowercase_ , response_class=lowercase_ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=lowercase_ , response_class=lowercase_ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=lowercase_ , response_class=lowercase_ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=lowercase_ , response_class=lowercase_ , methods=["POST"] , ),
] , timeout=6_00 , )
def __UpperCamelCase ( self : int ) -> List[str]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) ) -> Optional[Any]:
try:
lowercase__ : Union[str, Any] = self._pipeline.tokenizer.tokenize(lowercase_ )
if return_ids:
lowercase__ : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(lowercase_ )
return ServeTokenizeResult(tokens=lowercase_ , tokens_ids=lowercase_ )
else:
return ServeTokenizeResult(tokens=lowercase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(lowercase_ )} )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[int] = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) , lowercase_ : bool = Body(lowercase_ , embed=lowercase_ ) , ) -> Optional[int]:
try:
lowercase__ : str = self._pipeline.tokenizer.decode(lowercase_ , lowercase_ , lowercase_ )
return ServeDeTokenizeResult(model="" , text=lowercase_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(lowercase_ )} )
async def __UpperCamelCase ( self : Tuple , lowercase_ : int=Body(lowercase_ , embed=lowercase_ ) ) -> List[Any]:
# Check we don't have empty string
if len(lowercase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowercase__ : int = self._pipeline(lowercase_ )
return ServeForwardResult(output=lowercase_ )
except Exception as e:
raise HTTPException(5_00 , {"error": str(lowercase_ )} )
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class UpperCAmelCase ( A_ ):
def __init__(self : int , **snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(snake_case__ )
def __call__(self : str , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
if "text_queries" in kwargs:
snake_case : Tuple = kwargs.pop("text_queries" )
if isinstance(snake_case__ , (str, Image.Image) ):
snake_case : Dict = {"image": image, "candidate_labels": candidate_labels}
else:
snake_case : Tuple = image
snake_case : Dict = super().__call__(snake_case__ , **snake_case__ )
return results
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = {}
if "threshold" in kwargs:
snake_case : Optional[Any] = kwargs["threshold"]
if "top_k" in kwargs:
snake_case : int = kwargs["top_k"]
return {}, {}, postprocess_params
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : int = load_image(inputs["image"] )
snake_case : int = inputs["candidate_labels"]
if isinstance(snake_case__ , snake_case__ ):
snake_case : int = candidate_labels.split("," )
snake_case : List[Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
snake_case : Any = self.tokenizer(snake_case__ , return_tensors=self.framework )
snake_case : str = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Any ) -> int:
'''simple docstring'''
snake_case : Any = model_inputs.pop("target_size" )
snake_case : str = model_inputs.pop("candidate_label" )
snake_case : Dict = model_inputs.pop("is_last" )
snake_case : Optional[Any] = self.model(**snake_case__ )
snake_case : Tuple = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str]=0.1 , snake_case__ : Any=None ) -> str:
'''simple docstring'''
snake_case : Dict = []
for model_output in model_outputs:
snake_case : List[str] = model_output["candidate_label"]
snake_case : Union[str, Any] = BaseModelOutput(snake_case__ )
snake_case : str = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
snake_case : str = outputs["scores"][index].item()
snake_case : Optional[int] = self._get_bounding_box(outputs["boxes"][index][0] )
snake_case : Tuple = {"score": score, "label": label, "box": box}
results.append(snake_case__ )
snake_case : int = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
snake_case : Optional[Any] = results[:top_k]
return results
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
snake_case : int = box.int().tolist()
snake_case : Dict = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
import operator
def UpperCAmelCase__ ( _A : list , _A : bool = False , _A : list | None = None ):
'''simple docstring'''
a__ =operator.lt if reverse else operator.gt
a__ =solution or []
if not arr:
return solution
a__ =[arr.pop(0 )]
for i, item in enumerate(_A ):
if _operator(_A , sublist[-1] ):
sublist.append(_A )
arr.pop(_A )
# merging sublist into solution list
if not solution:
solution.extend(_A )
else:
while sublist:
a__ =sublist.pop(0 )
for i, xx in enumerate(_A ):
if not _operator(_A , _A ):
solution.insert(_A , _A )
break
else:
solution.append(_A )
strand_sort(_A , _A , _A )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 188 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=2, lowercase_=99, lowercase_=0, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=12, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_="last", lowercase_=None, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_lengths
a__ =use_token_type_ids
a__ =use_labels
a__ =gelu_activation
a__ =sinusoidal_embeddings
a__ =causal
a__ =asm
a__ =n_langs
a__ =vocab_size
a__ =n_special
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =summary_type
a__ =use_proj
a__ =scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_input_lengths:
a__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], 2 ).float()
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lengths=lowercase_, langs=lowercase_ )
a__ =model(lowercase_, langs=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> str:
"""simple docstring"""
a__ =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, p_mask=lowercase_, )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, )
((a__), ) =result_with_labels.to_tuple()
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
((a__), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =self.num_choices
a__ =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, token_type_ids=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> str:
"""simple docstring"""
a__ =super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =FlaubertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, emb_dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ =True
a__ =model_class(config=lowercase_ )
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =torch.jit.trace(
lowercase_, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_, os.path.join(lowercase_, '''traced_model.pt''' ) )
a__ =torch.jit.load(os.path.join(lowercase_, '''traced_model.pt''' ), map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ), inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
a__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 188 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="xlm"
UpperCAmelCase ={
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , snake_case=3_0_1_4_5 , snake_case=2_0_4_8 , snake_case=1_2 , snake_case=1_6 , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=1 , snake_case=True , snake_case=5_1_2 , snake_case=2_0_4_8**-0.5 , snake_case=1E-1_2 , snake_case=0.02 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=3 , snake_case=5 , snake_case=True , snake_case="first" , snake_case=True , snake_case=None , snake_case=True , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=0 , snake_case=0 , snake_case=2 , snake_case=0 , **snake_case , ) -> int:
'''simple docstring'''
_UpperCAmelCase : int =vocab_size
_UpperCAmelCase : Optional[Any] =emb_dim
_UpperCAmelCase : Union[str, Any] =n_layers
_UpperCAmelCase : str =n_heads
_UpperCAmelCase : List[str] =dropout
_UpperCAmelCase : Optional[Any] =attention_dropout
_UpperCAmelCase : Tuple =gelu_activation
_UpperCAmelCase : Dict =sinusoidal_embeddings
_UpperCAmelCase : Optional[int] =causal
_UpperCAmelCase : Optional[Any] =asm
_UpperCAmelCase : Any =n_langs
_UpperCAmelCase : Dict =use_lang_emb
_UpperCAmelCase : Union[str, Any] =layer_norm_eps
_UpperCAmelCase : List[str] =bos_index
_UpperCAmelCase : int =eos_index
_UpperCAmelCase : Optional[int] =pad_index
_UpperCAmelCase : List[Any] =unk_index
_UpperCAmelCase : Optional[int] =mask_index
_UpperCAmelCase : str =is_encoder
_UpperCAmelCase : Dict =max_position_embeddings
_UpperCAmelCase : Optional[Any] =embed_init_std
_UpperCAmelCase : List[str] =init_std
_UpperCAmelCase : Tuple =summary_type
_UpperCAmelCase : Dict =summary_use_proj
_UpperCAmelCase : Dict =summary_activation
_UpperCAmelCase : Optional[Any] =summary_proj_to_labels
_UpperCAmelCase : Dict =summary_first_dropout
_UpperCAmelCase : Optional[int] =start_n_top
_UpperCAmelCase : Dict =end_n_top
_UpperCAmelCase : Tuple =mask_token_id
_UpperCAmelCase : Optional[int] =lang_id
if "n_words" in kwargs:
_UpperCAmelCase : Tuple =kwargs['n_words']
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , **snake_case)
class __magic_name__ ( lowerCAmelCase ):
@property
def lowerCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase : str ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase : List[str] ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 242 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict ='ZinengTang/tvlt-base'
_UpperCAmelCase : Dict =tempfile.mkdtemp()
def lowerCAmelCase ( self , **snake_case) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> Dict:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =self.get_image_processor()
_UpperCAmelCase : Optional[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase : str =TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , snake_case)
self.assertIsInstance(processor.image_processor , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str =self.get_image_processor()
_UpperCAmelCase : List[Any] =self.get_feature_extractor()
_UpperCAmelCase : str =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =feature_extractor(snake_case , return_tensors='np')
_UpperCAmelCase : Dict =processor(audio=snake_case , return_tensors='np')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_image_processor()
_UpperCAmelCase : int =self.get_feature_extractor()
_UpperCAmelCase : int =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Union[str, Any] =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[Any] =image_processor(snake_case , return_tensors='np')
_UpperCAmelCase : List[Any] =processor(images=snake_case , return_tensors='np')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.get_image_processor()
_UpperCAmelCase : Dict =self.get_feature_extractor()
_UpperCAmelCase : Optional[int] =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
_UpperCAmelCase : Optional[int] =np.ones([1_2_0_0_0])
_UpperCAmelCase : str =np.ones([3, 2_2_4, 2_2_4])
_UpperCAmelCase : Optional[int] =processor(audio=snake_case , images=snake_case)
self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'])
# test if it raises when no input is passed
with pytest.raises(snake_case):
processor()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.get_image_processor()
_UpperCAmelCase : Tuple =self.get_feature_extractor()
_UpperCAmelCase : Dict =TvltProcessor(image_processor=snake_case , feature_extractor=snake_case)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 242 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool , UpperCAmelCase__ : str = None , UpperCAmelCase__ : list = None ) -> Dict:
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase = os.path.abspath('examples' )
for item in os.listdir(UpperCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase__ , feature_script=UpperCAmelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase = compare_against_test(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = '\n'.join(UpperCAmelCase__ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(UpperCAmelCase__ , '' )
self.assertEqual(UpperCAmelCase__ , '' )
def __UpperCAmelCase ( self : Tuple ) -> str:
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = False
@classmethod
def __UpperCAmelCase ( cls : str ) -> str:
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls : Tuple ) -> List[str]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
else:
self.assertIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
lowerCAmelCase = re.findall('({.+})' , UpperCAmelCase__ )
lowerCAmelCase = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase = ast.literal_eval(UpperCAmelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'tracking' ) ) )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase = list(accumulate(UpperCamelCase__ ) )
__lowerCamelCase = bisect(UpperCamelCase__ , UpperCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | '''simple docstring'''
__UpperCAmelCase ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = input('''Enter message: ''' )
__lowerCamelCase = input('''Enter key [alphanumeric]: ''' )
__lowerCamelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowerCamelCase = '''encrypt'''
__lowerCamelCase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
__lowerCamelCase = '''decrypt'''
__lowerCamelCase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
print(f"""\n{mode.title()}ed message:""" )
print(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''encrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''decrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = key.upper()
for symbol in message:
__lowerCamelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCamelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCamelCase__ ):
__lowerCamelCase = 0
else:
translated.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 237 | 1 |
from math import factorial, pi
def a__ ( snake_case , snake_case = 30 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
__SCREAMING_SNAKE_CASE : Dict = float(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase__ ) )
def a__ ( snake_case , snake_case = 30 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
__SCREAMING_SNAKE_CASE : int = float(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 303 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45 | 0 |
"""simple docstring"""
def lowerCamelCase (a_ :Dict , a_ :Any) -> str:
if not (isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)):
raise ValueError('''longest_common_substring() takes two strings for inputs''')
lowercase :Union[str, Any] = len(__UpperCAmelCase)
lowercase :Optional[Any] = len(__UpperCAmelCase)
lowercase :Tuple = [[0] * (texta_length + 1) for _ in range(texta_length + 1)]
lowercase :Optional[Any] = 0
lowercase :str = 0
for i in range(1 , texta_length + 1):
for j in range(1 , texta_length + 1):
if texta[i - 1] == texta[j - 1]:
lowercase :Optional[int] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowercase :List[str] = i
lowercase :Tuple = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "layoutlmv3"
def __init__( self : int , snake_case__ : Any=5_0_2_6_5 , snake_case__ : int=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Union[str, Any]=3_0_7_2 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : int=5_1_2 , snake_case__ : int=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : Optional[int]=1 , snake_case__ : Any=0 , snake_case__ : Optional[int]=2 , snake_case__ : int=1_0_2_4 , snake_case__ : str=1_2_8 , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any=1_2_8 , snake_case__ : List[Any]=6_4 , snake_case__ : List[Any]=2_5_6 , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=2_2_4 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : str=None , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowercase :Optional[int] = max_ad_position_embeddings
lowercase :Tuple = coordinate_size
lowercase :Any = shape_size
lowercase :Union[str, Any] = has_relative_attention_bias
lowercase :Optional[Any] = rel_pos_bins
lowercase :Tuple = max_rel_pos
lowercase :Any = has_spatial_attention_bias
lowercase :Any = rel_ad_pos_bins
lowercase :str = max_rel_ad_pos
lowercase :int = text_embed
lowercase :Optional[int] = visual_embed
lowercase :str = input_size
lowercase :List[str] = num_channels
lowercase :str = patch_size
lowercase :Any = classifier_dropout
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = version.parse("1.12" )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return 1e-5
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 1_2
def __snake_case ( self : str , snake_case__ : "ProcessorMixin" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 4_0 , snake_case__ : int = 4_0 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase :Dict = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase :Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
lowercase :List[str] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
lowercase :Tuple = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase :List[str] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase :List[Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :Dict = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 172 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a =(
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
warnings.warn(lowerCamelCase__ , lowerCamelCase__ )
requires_backends(lowerCamelCase__ , 'sklearn' )
return (preds == labels).mean()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
warnings.warn(lowerCamelCase__ , lowerCamelCase__ )
requires_backends(lowerCamelCase__ , 'sklearn' )
__lowerCamelCase : Tuple = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
warnings.warn(lowerCamelCase__ , lowerCamelCase__ )
requires_backends(lowerCamelCase__ , 'sklearn' )
__lowerCamelCase : str = pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0]
__lowerCamelCase : str = spearmanr(lowerCamelCase__ , lowerCamelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
warnings.warn(lowerCamelCase__ , lowerCamelCase__ )
requires_backends(lowerCamelCase__ , 'sklearn' )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), F"Predictions and labels have mismatched lengths {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
warnings.warn(lowerCamelCase__ , lowerCamelCase__ )
requires_backends(lowerCamelCase__ , 'sklearn' )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(lowerCamelCase__ )
| 113 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
a =3
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
print('Generating primitive root of p' )
while True:
__lowerCamelCase : Tuple = random.randrange(3 , lowerCamelCase__ )
if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1:
continue
if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
__lowerCamelCase : List[str] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number.
__lowerCamelCase : Dict = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p.
__lowerCamelCase : Optional[int] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety.
__lowerCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : int = (key_size, e_a, e_a, p)
__lowerCamelCase : str = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> None:
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print('\nWARNING:' )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__lowerCamelCase , __lowerCamelCase : List[Any] = generate_key(lowerCamelCase__ )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , 'w' ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , 'w' ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 113 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case__ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
snake_case__ : Optional[int] = ['''names''', '''prefix''']
snake_case__ : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case__ : Any = ['''encoding_errors''', '''on_bad_lines''']
snake_case__ : Tuple = ['''date_format''']
@dataclass
class snake_case_( datasets.BuilderConfig ):
__UpperCamelCase = ","
__UpperCamelCase = None
__UpperCamelCase = "infer"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = "."
__UpperCamelCase = None
__UpperCamelCase = '"'
__UpperCamelCase = 0
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = 0
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = 10_000
__UpperCamelCase = None
__UpperCamelCase = "strict"
__UpperCamelCase = "error"
__UpperCamelCase = None
def lowerCamelCase__ ( self : Union[str, Any] ):
if self.delimiter is not None:
lowerCAmelCase : Dict = self.delimiter
if self.column_names is not None:
lowerCAmelCase : Dict = self.column_names
@property
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCamelCase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case_( datasets.ArrowBasedBuilder ):
__UpperCamelCase = CsvConfig
def lowerCamelCase__ ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCAmelCase : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase_ , (str, list, tuple) ):
lowerCAmelCase : Dict = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Tuple = [files]
lowerCAmelCase : List[Any] = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = [files]
lowerCAmelCase : Dict = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : pa.Table ):
if self.config.features is not None:
lowerCAmelCase : Any = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCamelCase_ ) for feature in self.config.features.values() ):
# cheaper cast
lowerCAmelCase : Union[str, Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCamelCase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCAmelCase : Any = table_cast(UpperCamelCase_ , UpperCamelCase_ )
return pa_table
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str ):
lowerCAmelCase : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCAmelCase : Dict = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCamelCase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ):
lowerCAmelCase : List[Any] = pd.read_csv(UpperCamelCase_ , iterator=UpperCamelCase_ , dtype=UpperCamelCase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCamelCase_ ):
lowerCAmelCase : List[Any] = pa.Table.from_pandas(UpperCamelCase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase_ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase_ )}: {e}''' )
raise
| 60 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a__ ( a__ , a__=None , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
while ask_again:
__SCREAMING_SNAKE_CASE = input(a__ )
try:
if default is not None and len(a__ ) == 0:
return default
return convert_value(a__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a__ )
def a__ ( a__ , a__=[] , a__=None , a__=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BulletMenu(a__ , a__ )
__SCREAMING_SNAKE_CASE = menu.run(default_choice=a__ )
return convert_value(a__ ) if convert_value is not None else result
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def a__ ( a__ ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class lowerCAmelCase__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 267 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_SCREAMING_SNAKE_CASE : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_SCREAMING_SNAKE_CASE : Optional[Any] = ''
else:
_SCREAMING_SNAKE_CASE : List[str] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : Any = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE : int = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_, UpperCAmelCase_ )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = val
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_SCREAMING_SNAKE_CASE : int = Image.open(requests.get(UpperCAmelCase_, stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 8
# set labels if required
if not base_model:
_SCREAMING_SNAKE_CASE : Any = 1000
_SCREAMING_SNAKE_CASE : List[str] = 'huggingface/label-files'
_SCREAMING_SNAKE_CASE : Tuple = 'imagenet-1k-id2label.json'
_SCREAMING_SNAKE_CASE : int = json.load(open(hf_hub_download(UpperCAmelCase_, UpperCAmelCase_, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
_SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_SCREAMING_SNAKE_CASE : Tuple = 384
_SCREAMING_SNAKE_CASE : Optional[int] = 1536
_SCREAMING_SNAKE_CASE : Dict = 12
_SCREAMING_SNAKE_CASE : Tuple = 6
# load original model from torch hub
_SCREAMING_SNAKE_CASE : List[str] = torch.hub.load("facebookresearch/dino:main", UpperCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE : Tuple = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = create_rename_keys(UpperCAmelCase_, base_model=UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
# load HuggingFace model
if base_model:
_SCREAMING_SNAKE_CASE : Optional[int] = ViTModel(UpperCAmelCase_, add_pooling_layer=UpperCAmelCase_ ).eval()
else:
_SCREAMING_SNAKE_CASE : int = ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
_SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=prepare_img(), return_tensors="pt" )
_SCREAMING_SNAKE_CASE : List[Any] = encoding['pixel_values']
_SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ )
if base_model:
_SCREAMING_SNAKE_CASE : int = original_model(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1e-1 )
else:
_SCREAMING_SNAKE_CASE : List[Any] = original_model(UpperCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCamelCase__ =parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 357 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite) | 325 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : UNetaDModel ,SCREAMING_SNAKE_CASE__ : UNetaDModel ,SCREAMING_SNAKE_CASE__ : DDPMScheduler ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__()
__lowerCamelCase : str = value_function
__lowerCamelCase : List[Any] = unet
__lowerCamelCase : str = scheduler
__lowerCamelCase : List[Any] = env
__lowerCamelCase : List[Any] = env.get_dataset()
__lowerCamelCase : Union[str, Any] = {}
for key in self.data.keys():
try:
__lowerCamelCase : Union[str, Any] = self.data[key].mean()
except: # noqa: E722
pass
__lowerCamelCase : Tuple = {}
for key in self.data.keys():
try:
__lowerCamelCase : int = self.data[key].std()
except: # noqa: E722
pass
__lowerCamelCase : Tuple = env.observation_space.shape[0]
__lowerCamelCase : Dict = env.action_space.shape[0]
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict):
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if type(SCREAMING_SNAKE_CASE__) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE__) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE__):
return x_in.to(self.unet.device)
return torch.tensor(SCREAMING_SNAKE_CASE__ ,device=self.unet.device)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]):
for key, val in cond.items():
__lowerCamelCase : str = val.clone()
return x_in
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Dict = x.shape[0]
__lowerCamelCase : Union[str, Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
__lowerCamelCase : List[str] = torch.full((batch_size,) ,SCREAMING_SNAKE_CASE__ ,device=self.unet.device ,dtype=torch.long)
for _ in range(SCREAMING_SNAKE_CASE__):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__lowerCamelCase : Tuple = self.value_function(x.permute(0 ,2 ,1) ,SCREAMING_SNAKE_CASE__).sample
__lowerCamelCase : str = torch.autograd.grad([y.sum()] ,[x])[0]
__lowerCamelCase : Optional[Any] = self.scheduler._get_variance(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.exp(0.5 * posterior_variance)
__lowerCamelCase : List[str] = model_std * grad
__lowerCamelCase : str = 0
__lowerCamelCase : Optional[Any] = x.detach()
__lowerCamelCase : Optional[Any] = x + scale * grad
__lowerCamelCase : Optional[int] = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim)
__lowerCamelCase : Optional[int] = self.unet(x.permute(0 ,2 ,1) ,SCREAMING_SNAKE_CASE__).sample.permute(0 ,2 ,1)
# TODO: verify deprecation of this kwarg
__lowerCamelCase : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,predict_epsilon=SCREAMING_SNAKE_CASE__)['prev_sample']
# apply conditions to the trajectory (set the initial state)
__lowerCamelCase : Optional[Any] = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim)
__lowerCamelCase : List[Any] = self.to_torch(SCREAMING_SNAKE_CASE__)
return x, y
def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : int=3_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1):
# normalize the observations and create batch dimension
__lowerCamelCase : Tuple = self.normalize(SCREAMING_SNAKE_CASE__ ,'observations')
__lowerCamelCase : List[str] = obs[None].repeat(SCREAMING_SNAKE_CASE__ ,axis=0)
__lowerCamelCase : Optional[int] = {0: self.to_torch(SCREAMING_SNAKE_CASE__)}
__lowerCamelCase : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__lowerCamelCase : Any = randn_tensor(SCREAMING_SNAKE_CASE__ ,device=self.unet.device)
__lowerCamelCase : Tuple = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim)
__lowerCamelCase : List[str] = self.to_torch(SCREAMING_SNAKE_CASE__)
# run the diffusion process
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.run_diffusion(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# sort output trajectories by value
__lowerCamelCase : Tuple = y.argsort(0 ,descending=SCREAMING_SNAKE_CASE__).squeeze()
__lowerCamelCase : Tuple = x[sorted_idx]
__lowerCamelCase : List[str] = sorted_values[:, :, : self.action_dim]
__lowerCamelCase : List[Any] = actions.detach().cpu().numpy()
__lowerCamelCase : str = self.de_normalize(SCREAMING_SNAKE_CASE__ ,key='actions')
# select the action with the highest value
if y is not None:
__lowerCamelCase : Any = 0
else:
# if we didn't run value guiding, select a random action
__lowerCamelCase : Dict = np.random.randint(0 ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = denorm_actions[selected_index, 0]
return denorm_actions
| 73 |
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase ,__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,**lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCamelCase ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,**lowerCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowerCamelCase ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase ,padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=__lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase ,return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ,images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase ,image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ,images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 367 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 50 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 193 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = math.inf , SCREAMING_SNAKE_CASE_ = -math.inf , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = 0.01 , SCREAMING_SNAKE_CASE_ = 1 , ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = search_prob
lowerCamelCase : Optional[int] = start_temperate
lowerCamelCase : Any = []
lowerCamelCase : List[Any] = 0
lowerCamelCase : Dict = None
while not search_end:
lowerCamelCase : Any = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase : List[str] = current_state
scores.append(_lowercase )
iterations += 1
lowerCamelCase : Optional[int] = None
lowerCamelCase : Any = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase : str = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
lowerCamelCase : int = neighbors.pop(_lowercase )
lowerCamelCase : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase : List[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase : List[Any] = picked_neighbor
else:
lowerCamelCase : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase : str = picked_neighbor
lowerCamelCase : Tuple = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase : Union[str, Any] = True
else:
lowerCamelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 283 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a : Optional[int] = logging.get_logger(__name__)
# General docstring
a : Union[str, Any] = '''MobileNetV1Config'''
# Base docstring
a : str = '''google/mobilenet_v1_1.0_224'''
a : str = [1, 1024, 7, 7]
# Image classification docstring
a : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
a : Optional[int] = '''tabby, tabby cat'''
a : List[str] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : str , _lowercase : int=None ) ->int:
'''simple docstring'''
a : List[Any] = {}
if isinstance(_lowercase , _lowercase ):
a : Union[str, Any] = model.mobilenet_va
else:
a : List[str] = model
a : Dict = "MobilenetV1/Conv2d_0/"
a : Tuple = backbone.conv_stem.convolution.weight
a : Dict = backbone.conv_stem.normalization.bias
a : Optional[Any] = backbone.conv_stem.normalization.weight
a : Optional[Any] = backbone.conv_stem.normalization.running_mean
a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
a : List[str] = i + 1
a : Dict = i * 2
a : int = backbone.layer[pt_index]
a : List[str] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
a : int = pointer.convolution.weight
a : Union[str, Any] = pointer.normalization.bias
a : Union[str, Any] = pointer.normalization.weight
a : Optional[Any] = pointer.normalization.running_mean
a : Dict = pointer.normalization.running_var
a : List[Any] = backbone.layer[pt_index + 1]
a : Union[str, Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
a : Dict = pointer.convolution.weight
a : Optional[Any] = pointer.normalization.bias
a : Dict = pointer.normalization.weight
a : Optional[Any] = pointer.normalization.running_mean
a : Optional[Any] = pointer.normalization.running_var
if isinstance(_lowercase , _lowercase ):
a : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/"
a : Tuple = model.classifier.weight
a : Optional[int] = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[Any] , _lowercase : Tuple ) ->int:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
a : List[Any] = tf.train.list_variables(_lowercase )
a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
a : Union[str, Any] = tf.train.load_variable(_lowercase , _lowercase )
a : Optional[Any] = array
# Build TF to PyTorch weights loading map
a : Tuple = _build_tf_to_pytorch_map(_lowercase , _lowercase , _lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
a : List[str] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
a : List[Any] = np.transpose(_lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
a : Union[str, Any] = array.squeeze().transpose()
else:
a : Any = np.transpose(_lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
a : str = torch.from_numpy(_lowercase )
tf_weights.pop(_lowercase , _lowercase )
tf_weights.pop(name + "/RMSProp" , _lowercase )
tf_weights.pop(name + "/RMSProp_1" , _lowercase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _lowercase )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _SCREAMING_SNAKE_CASE ( _lowercase : torch.Tensor , _lowercase : nn.Convad ) ->torch.Tensor:
'''simple docstring'''
a, a : Any = features.shape[-2:]
a, a : Dict = conv_layer.stride
a, a : int = conv_layer.kernel_size
if in_height % stride_height == 0:
a : Tuple = max(kernel_height - stride_height , 0 )
else:
a : Optional[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
a : Optional[Any] = max(kernel_width - stride_width , 0 )
else:
a : str = max(kernel_width - (in_width % stride_width) , 0 )
a : Any = pad_along_width // 2
a : List[str] = pad_along_width - pad_left
a : List[str] = pad_along_height // 2
a : List[Any] = pad_along_height - pad_top
a : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowercase , _lowercase , "constant" , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> None:
super().__init__()
a : str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
a : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
a : Tuple = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
a : Optional[int] = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
a : int = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__ ):
a : Dict = ACTaFN[config.hidden_act]
else:
a : Union[str, Any] = config.hidden_act
else:
a : int = None
def __a ( self , lowerCAmelCase__ ) -> torch.Tensor:
if self.config.tf_padding:
a : Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution )
a : List[str] = self.convolution(lowerCAmelCase__ )
if self.normalization is not None:
a : int = self.normalization(lowerCAmelCase__ )
if self.activation is not None:
a : Dict = self.activation(lowerCAmelCase__ )
return features
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =MobileNetVaConfig
lowerCamelCase : str =load_tf_weights_in_mobilenet_va
lowerCamelCase : List[str] ="""mobilenet_v1"""
lowerCamelCase : Tuple ="""pixel_values"""
lowerCamelCase : Optional[Any] =False
def __a ( self , lowerCAmelCase__ ) -> None:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a : Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True ) -> List[str]:
super().__init__(lowerCAmelCase__ )
a : Tuple = config
a : Dict = 32
a : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
a : Dict = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
a : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
a : int = nn.ModuleList()
for i in range(13 ):
a : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
a : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ) )
a : Tuple = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a : List[str] = self.conv_stem(lowerCAmelCase__ )
a : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
a : List[Any] = layer_module(lowerCAmelCase__ )
if output_hidden_states:
a : Optional[Any] = all_hidden_states + (hidden_states,)
a : Any = hidden_states
if self.pooler is not None:
a : Union[str, Any] = torch.flatten(self.pooler(lowerCAmelCase__ ) , start_dim=1 )
else:
a : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ ) -> None:
super().__init__(lowerCAmelCase__ )
a : int = config.num_labels
a : List[Any] = MobileNetVaModel(lowerCAmelCase__ )
a : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
a : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__ )
a : str = nn.Linear(lowerCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : Any = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
a : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
a : Tuple = self.classifier(self.dropout(lowerCAmelCase__ ) )
a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : Any = "single_label_classification"
else:
a : int = "multi_label_classification"
if self.config.problem_type == "regression":
a : Tuple = MSELoss()
if self.num_labels == 1:
a : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a : str = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
a : List[Any] = CrossEntropyLoss()
a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a : int = BCEWithLogitsLoss()
a : Optional[int] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
a : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 105 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=12 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0 , __UpperCAmelCase=None , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Any = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Union[str, Any] = projection_dim
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : int = scope
__UpperCAmelCase : Optional[Any] = bos_token_id
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__UpperCAmelCase : int = input_mask.numpy()
__UpperCAmelCase , __UpperCAmelCase : int = input_mask.shape
__UpperCAmelCase : Any = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : str = 0
__UpperCAmelCase : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = TFBlipTextModel(config=__UpperCAmelCase )
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase )
__UpperCAmelCase : int = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BlipTextModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
@slow
def __A ( self ) -> int:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = TFBlipTextModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase=True ) -> List[Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
| 16 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : List[Any] = scope
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__UpperCAmelCase : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = LlamaModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Optional[Any] = """single_label_classification"""
__UpperCAmelCase : int = input_dict["""input_ids"""]
__UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : str = """multi_label_classification"""
__UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
__UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0}
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
__UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__UpperCAmelCase : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) )
__UpperCAmelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """
__UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" )
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase )
# greedy generation outputs
__UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 16 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
A_ = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("""steps_offset!=1""" , """1.0.0""" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
A_ = dict(scheduler.config )
A_ = 1
A_ = FrozenDict(UpperCAmelCase_ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
A_ = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
A_ = dict(scheduler.config )
A_ = True
A_ = FrozenDict(UpperCAmelCase_ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=UpperCAmelCase_ , segmentation_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def snake_case_ ( self , UpperCamelCase__ = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
A_ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ) -> str:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 50 , UpperCamelCase__ = 7.5 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
A_ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
A_ = self.segmentation_model(**UpperCAmelCase_ )
A_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
A_ = self.numpy_to_pil(UpperCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
A_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , )
| 162 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 0 |
from __future__ import annotations
from collections.abc import Callable
def lowercase__ ( __snake_case : Callable[[int | float], int | float] , __snake_case : int | float , __snake_case : int | float , __snake_case : int = 100 , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = x_start
UpperCAmelCase_ : Optional[int] = fnc(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase_ : int = (x_end - x_start) / steps + xa
UpperCAmelCase_ : Union[str, Any] = fnc(lowerCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase_ : Any = xa
UpperCAmelCase_ : str = fxa
return area
if __name__ == "__main__":
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__UpperCAmelCase = 10
while i <= 100000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10 | 358 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__UpperCAmelCase = '▁'
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = ['''input_ids''', '''attention_mask''']
_snake_case : Union[str, Any] = BarthezTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , **_UpperCamelCase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Tuple = vocab_file
UpperCAmelCase_ : Tuple = False if not self.vocab_file else True
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,)
| 145 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase__ ):
lowerCamelCase : Optional[Any] = '''roberta'''
def __init__( self : Tuple , UpperCAmelCase__ : List[str]=5_0_2_6_5 , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Tuple=3_0_7_2 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=5_1_2 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : int=1E-12 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Optional[int]="absolute" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : str , ) -> Optional[int]:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase__ ):
@property
def __UpperCAmelCase ( self : int ) -> Tuple:
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 297 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=17,__SCREAMING_SNAKE_CASE=23,__SCREAMING_SNAKE_CASE=11,__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = act_dim
__lowerCAmelCase = state_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_length
__lowerCAmelCase = is_training
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCAmelCase = ids_tensor((self.batch_size, self.seq_length),vocab_size=10_00 )
__lowerCAmelCase = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCAmelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase__ ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size,seq_length=self.seq_length,act_dim=self.act_dim,state_dim=self.state_dim,hidden_size=self.hidden_size,max_length=self.max_length,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = DecisionTransformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.state_preds.shape,states.shape )
self.parent.assertEqual(result.action_preds.shape,actions.shape )
self.parent.assertEqual(result.return_preds.shape,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Any =(DecisionTransformerModel,) if is_torch_available() else ()
a : Tuple =()
a : Dict ={"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
a : List[Any] =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
a : Dict =False
a : str =False
a : Optional[int] =False
a : int =False
a : Tuple =False
a : List[str] =False
a : Any =False
a : Dict =False
a : List[Any] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DecisionTransformerModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,hidden_size=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DecisionTransformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__SCREAMING_SNAKE_CASE )],__SCREAMING_SNAKE_CASE )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 2 # number of steps of autoregressive prediction we will perform
__lowerCAmelCase = 10 # defined by the RL environment, may be normalized
__lowerCAmelCase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.config
torch.manual_seed(0 )
__lowerCAmelCase = torch.randn(1,1,config.state_dim ).to(device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa ) # env.reset()
__lowerCAmelCase = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]],device=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(__SCREAMING_SNAKE_CASE,device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa ).reshape(1,1,1 )
__lowerCAmelCase = state
__lowerCAmelCase = torch.zeros(1,0,config.act_dim,device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa )
__lowerCAmelCase = torch.zeros(1,0,device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa )
__lowerCAmelCase = torch.tensor(0,device=__SCREAMING_SNAKE_CASE,dtype=torch.long ).reshape(1,1 )
for step in range(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = torch.cat([actions, torch.zeros(1,1,config.act_dim,device=__SCREAMING_SNAKE_CASE )],dim=1 )
__lowerCAmelCase = torch.cat([rewards, torch.zeros(1,1,device=__SCREAMING_SNAKE_CASE )],dim=1 )
__lowerCAmelCase = torch.ones(1,states.shape[1] ).to(dtype=torch.long,device=states.device )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model(
states=__SCREAMING_SNAKE_CASE,actions=__SCREAMING_SNAKE_CASE,rewards=__SCREAMING_SNAKE_CASE,returns_to_go=__SCREAMING_SNAKE_CASE,timesteps=__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,return_dict=__SCREAMING_SNAKE_CASE,)
self.assertEqual(action_pred.shape,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1],expected_outputs[step],atol=1e-4 ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = ( # env.step(action)
torch.randn(1,1,config.state_dim ).to(device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCAmelCase = action_pred[0, -1]
__lowerCAmelCase = torch.cat([states, state],dim=1 )
__lowerCAmelCase = returns_to_go[0, -1] - reward
__lowerCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1,1,1 )],dim=1 )
__lowerCAmelCase = torch.cat(
[timesteps, torch.ones((1, 1),device=__SCREAMING_SNAKE_CASE,dtype=torch.long ) * (step + 1)],dim=1 )
| 350 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _lowerCAmelCase ( lowercase=None ) -> Any:
__lowerCAmelCase = argparse.ArgumentParser(add_help=lowercase , allow_abbrev=lowercase )
# The main config parser
__lowerCAmelCase = config_command_parser(lowercase )
# The subparser to add commands to
__lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowercase , parents=[parent_parser] )
update_command_parser(lowercase , parents=[parent_parser] )
return config_parser
def _lowerCAmelCase ( ) -> List[Any]:
__lowerCAmelCase = get_config_parser()
__lowerCAmelCase = config_parser.parse_args()
if not hasattr(lowercase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 46 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__lowercase = arr[i]
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
# A temporary array to store all combination one by one
__lowercase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 325 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Any = """funnel"""
_lowerCAmelCase : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Dict , lowercase_ : List[Any]=30522 , lowercase_ : Any=[4, 4, 4] , lowercase_ : Any=None , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Any=64 , lowercase_ : Any=3072 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=1E-9 , lowercase_ : List[Any]="mean" , lowercase_ : Any="relative_shift" , lowercase_ : str=True , lowercase_ : int=True , lowercase_ : Optional[int]=True , **lowercase_ : Optional[Any] , ):
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Optional[Any] = block_sizes
snake_case_ : Optional[Any] = [1] * len(lowercase_ ) if block_repeats is None else block_repeats
assert len(lowercase_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ : Optional[Any] = num_decoder_layers
snake_case_ : str = d_model
snake_case_ : Optional[int] = n_head
snake_case_ : int = d_head
snake_case_ : Union[str, Any] = d_inner
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[Any] = hidden_dropout
snake_case_ : List[str] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Dict = initializer_std
snake_case_ : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
snake_case_ : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
snake_case_ : Any = attention_type
snake_case_ : Dict = separate_cls
snake_case_ : List[Any] = truncate_seq
snake_case_ : List[str] = pool_q_only
super().__init__(**lowercase_ )
@property
def _snake_case ( self : Optional[Any] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _snake_case ( self : Optional[int] , lowercase_ : Union[str, Any] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def _snake_case ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def _snake_case ( self : str , lowercase_ : Dict ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 155 |
"""simple docstring"""
from collections.abc import Generator
def __lowercase ( ):
snake_case_, snake_case_ : List[str] = 0, 1
while True:
snake_case_, snake_case_ : List[str] = b, a + b
yield b
def __lowercase ( _a = 1_000 ):
snake_case_ : Tuple = 1
snake_case_ : List[str] = fibonacci_generator()
while len(str(next(_a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 155 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__: Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( _A ):
"""simple docstring"""
if "resnet-50" in model_name:
__magic_name__ : Dict = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
__magic_name__ : Union[str, Any] = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
__magic_name__ : Union[str, Any] = DetrConfig(use_timm_backbone=_A, backbone_config=_A )
# set label attributes
__magic_name__ : List[str] = """panoptic""" in model_name
if is_panoptic:
__magic_name__ : Tuple = 250
else:
__magic_name__ : Tuple = 91
__magic_name__ : Optional[Any] = """huggingface/label-files"""
__magic_name__ : Optional[Any] = """coco-detection-id2label.json"""
__magic_name__ : Optional[int] = json.load(open(hf_hub_download(_A, _A, repo_type="""dataset""" ), """r""" ) )
__magic_name__ : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
__magic_name__ : List[Any] = idalabel
__magic_name__ : Dict = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = state_dict.pop(_A )
__magic_name__ : List[str] = val
def UpperCamelCase ( _A, _A=False ):
"""simple docstring"""
__magic_name__ : Any = """"""
if is_panoptic:
__magic_name__ : Optional[Any] = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__magic_name__ : Optional[Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Any = in_proj_weight[:256, :]
__magic_name__ : Dict = in_proj_bias[:256]
__magic_name__ : Tuple = in_proj_weight[256:512, :]
__magic_name__ : Union[str, Any] = in_proj_bias[256:512]
__magic_name__ : Dict = in_proj_weight[-256:, :]
__magic_name__ : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__magic_name__ : str = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__magic_name__ : List[str] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : int = in_proj_weight[:256, :]
__magic_name__ : int = in_proj_bias[:256]
__magic_name__ : str = in_proj_weight[256:512, :]
__magic_name__ : Tuple = in_proj_bias[256:512]
__magic_name__ : int = in_proj_weight[-256:, :]
__magic_name__ : Any = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__magic_name__ : Any = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
__magic_name__ : int = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__magic_name__ : Any = in_proj_weight_cross_attn[:256, :]
__magic_name__ : List[str] = in_proj_bias_cross_attn[:256]
__magic_name__ : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
__magic_name__ : Any = in_proj_bias_cross_attn[256:512]
__magic_name__ : Any = in_proj_weight_cross_attn[-256:, :]
__magic_name__ : List[Any] = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : Dict = Image.open(requests.get(_A, stream=_A ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _A, _A=None, _A=False ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : Optional[int] = get_detr_config(_A )
# load original model from torch hub
__magic_name__ : Optional[Any] = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f'Converting model {model_name}...' )
__magic_name__ : str = torch.hub.load("""facebookresearch/detr""", model_name_to_original_name[model_name], pretrained=_A ).eval()
__magic_name__ : List[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_A ):
if is_panoptic:
__magic_name__ : str = """detr.""" + src
rename_key(_A, _A, _A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A, is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ : List[str] = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__magic_name__ : Tuple = state_dict.pop(_A )
__magic_name__ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__magic_name__ : Dict = state_dict.pop(_A )
__magic_name__ : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__magic_name__ : str = state_dict.pop(_A )
__magic_name__ : List[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__magic_name__ : Union[str, Any] = state_dict.pop(_A )
__magic_name__ : Any = val
# finally, create HuggingFace model and load state dict
__magic_name__ : str = DetrForSegmentation(_A ) if is_panoptic else DetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion on an image
__magic_name__ : Any = """coco_panoptic""" if is_panoptic else """coco_detection"""
__magic_name__ : Optional[Any] = DetrImageProcessor(format=_A )
__magic_name__ : Optional[int] = processor(images=prepare_img(), return_tensors="""pt""" )
__magic_name__ : Dict = encoding["""pixel_values"""]
__magic_name__ : List[Any] = detr(_A )
__magic_name__ : str = model(_A )
assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1e-3 )
assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
__magic_name__: Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
__magic_name__: int = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 363 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "Speech2TextFeatureExtractor"
snake_case__ = "Speech2TextTokenizer"
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase__ = kwargs.pop("raw_speech" )
else:
lowerCAmelCase__ = kwargs.pop("audio" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("sampling_rate" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("text" , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def a ( self : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def a ( self : Union[str, Any] ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 221 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="resnet50" ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : Dict=32 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = parent
SCREAMING_SNAKE_CASE:List[Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE:Tuple = stage_names
SCREAMING_SNAKE_CASE:List[Any] = out_features
SCREAMING_SNAKE_CASE:Union[str, Any] = backbone
SCREAMING_SNAKE_CASE:Any = batch_size
SCREAMING_SNAKE_CASE:Any = image_size
SCREAMING_SNAKE_CASE:Dict = num_channels
SCREAMING_SNAKE_CASE:Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE:Tuple = is_training
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE:List[str] = self.get_config()
return config, pixel_values
def __UpperCamelCase ( self : Union[str, Any] ):
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:List[str] = TimmBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE:str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = config_and_inputs
SCREAMING_SNAKE_CASE:Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( _a , _a , _a , unittest.TestCase ):
_A : int = (TimmBackbone,) if is_torch_available() else ()
_A : Optional[Any] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : str = False
_A : Optional[Any] = False
_A : int = False
_A : Tuple = False
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE:Union[str, Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Any = "resnet18"
SCREAMING_SNAKE_CASE:Any = "microsoft/resnet-18"
SCREAMING_SNAKE_CASE:List[str] = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ ,use_timm_backbone=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE:str = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ ,use_timm_backbone=SCREAMING_SNAKE_CASE__ ,out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE:Union[str, Any] = AutoBackbone.from_pretrained(SCREAMING_SNAKE_CASE__ ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def __UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def __UpperCamelCase ( self : Any ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def __UpperCamelCase ( self : Optional[Any] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __UpperCamelCase ( self : int ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def __UpperCamelCase ( self : List[str] ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def __UpperCamelCase ( self : str ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __UpperCamelCase ( self : Dict ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __UpperCamelCase ( self : str ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def __UpperCamelCase ( self : Dict ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __UpperCamelCase ( self : Optional[int] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def __UpperCamelCase ( self : Tuple ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def __UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def __UpperCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def __UpperCamelCase ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : str ):
pass
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:int = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE:str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE:Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE:Union[str, Any] = True
SCREAMING_SNAKE_CASE:Dict = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE:Optional[int] = self.all_model_classes[0]
SCREAMING_SNAKE_CASE:str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE:Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE:int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE:Any = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = None
SCREAMING_SNAKE_CASE:int = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:List[Any] = model(**SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE:Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = False
SCREAMING_SNAKE_CASE:Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Dict = model(**SCREAMING_SNAKE_CASE__ )
| 139 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({"summary": Value("string" )} )
lowerCAmelCase : str = "text"
lowerCAmelCase : str = "summary"
@property
def lowerCAmelCase__ ( self : Tuple ) ->Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 322 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Any:
'''simple docstring'''
a__ : str =parent
a__ : Dict =batch_size
a__ : List[str] =seq_length
a__ : Any =is_training
a__ : Tuple =use_input_mask
a__ : List[str] =use_token_type_ids
a__ : Union[str, Any] =use_labels
a__ : Optional[int] =vocab_size
a__ : int =hidden_size
a__ : int =num_hidden_layers
a__ : List[Any] =num_attention_heads
a__ : str =intermediate_multiple_size
a__ : List[str] =hidden_act
a__ : Optional[int] =hidden_dropout
a__ : List[str] =attention_dropout
a__ : int =weight_tying
a__ : Optional[Any] =max_position_embeddings
a__ : Any =type_vocab_size
a__ : Optional[int] =type_sequence_label_size
a__ : Optional[Any] =initializer_range
a__ : Dict =num_labels
a__ : List[str] =num_choices
a__ : Union[str, Any] =scope
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Optional[int] =None
if self.use_input_mask:
a__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
a__ : Dict =None
if self.use_labels:
a__ : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Any =self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Tuple =self.prepare_config_and_inputs()
a__ : List[str] =True
return config, input_ids, input_mask, token_labels
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Any =GPTNeoXJapaneseModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a__ : Union[str, Any] =model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : Optional[int] =True
a__ : Dict =GPTNeoXJapaneseModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[int] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] =GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : int =True
a__ : str =GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
a__ : Any =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a__ : List[str] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ : Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : List[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ : List[str] =torch.cat([input_ids, next_tokens] , dim=-1 )
a__ : List[str] =torch.cat([input_mask, next_mask] , dim=-1 )
a__ : int =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
a__ : Dict =output_from_no_past["hidden_states"][0]
a__ : Any =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
a__ : List[str] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
a__ : Any =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[Any] =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : int =config_and_inputs
a__ : Optional[Any] ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[int] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_lowercase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_lowercase : Optional[int] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : Optional[Any] = False
_lowercase : Tuple = False
_lowercase : int = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =GPTNeoXJapaneseModelTester(self )
a__ : Union[str, Any] =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_decoder()
a__ : str =None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : Tuple ="abeja/gpt-neox-japanese-2.7b"
a__ : Any =["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
a__ : Dict =[
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
a__ : int =GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Dict =GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase__ )
a__ : List[str] =[]
for prompt in prompts:
a__ : List[str] =tokenizer(lowerCAmelCase__ , return_tensors="pt" ).input_ids
a__ : int =model.generate(lowerCAmelCase__ , max_length=5_0 )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
import os
def A_ ( ) -> Any:
a__ : Tuple = os.path.dirname(os.path.realpath(A__ ) )
a__ : List[str] = os.path.join(A__ , 'triangle.txt' )
with open(A__ ) as f:
a__ : Union[str, Any] = f.readlines()
a__ : List[str] = []
for line in triangle:
a__ : str = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(A__ ) )
a.append(A__ )
for i in range(1 , len(A__ ) ):
for j in range(len(a[i] ) ):
a__ : int = a[i - 1][j] if j != len(a[i - 1] ) else 0
a__ : str = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A__ , A__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 225 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
a__ : Optional[Any] = import_module('tasks' )
try:
a__ : List[Any] = getattr(A__ , model_args.task_type )
a__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a__ : Tuple = token_classification_task.get_labels(data_args.labels )
a__ : Dict[int, str] = dict(enumerate(A__ ) )
a__ : Union[str, Any] = len(A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel=A__ , labelaid={label: i for i, label in enumerate(A__ )} , cache_dir=model_args.cache_dir , )
a__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a__ : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
# Get datasets
a__ : int = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a__ : Optional[int] = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A__ , A__ ) -> Tuple[List[int], List[int]]:
a__ : Union[str, Any] = np.argmax(A__ , axis=2 )
a__ , a__ : Dict = preds.shape
a__ : Union[str, Any] = [[] for _ in range(A__ )]
a__ : Optional[int] = [[] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A__ ) -> Dict:
a__ , a__ : Union[str, Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A__ , A__ ),
"precision": precision_score(A__ , A__ ),
"recall": recall_score(A__ , A__ ),
"f1": fa_score(A__ , A__ ),
}
# Data collator
a__ : Union[str, Any] = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ : Optional[Any] = trainer.evaluate()
a__ : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
results.update(A__ )
# Predict
if training_args.do_predict:
a__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a__ , a__ , a__ : Any = trainer.predict(A__ )
a__ , a__ : Union[str, Any] = align_predictions(A__ , A__ )
a__ : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
a__ : Tuple = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A__ , A__ , A__ )
return results
def A_ ( A__ ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 225 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase : Any = logging.get_logger(__name__)
class snake_case__ (enum.Enum ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
@add_end_docstrings(_UpperCamelCase )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = """generated"""
def __init__( self : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , **__lowerCamelCase : str , ) -> Tuple:
a = {}
if truncation is not None:
a = truncation
a = generate_kwargs
a = {}
if return_tensors is not None and return_type is None:
a = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
a = return_type
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if stop_sequence is not None:
a = self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
if len(__lowerCamelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> Union[str, Any]:
return True
def __UpperCAmelCase ( self : Optional[int] , *__lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
a = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , __lowerCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
a = ([prefix + arg for arg in args[0]],)
a = True
elif isinstance(args[0] , __lowerCamelCase ):
a = (prefix + args[0],)
a = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
a = self.tokenizer(*__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> Optional[int]:
a = super().__call__(*__lowerCamelCase , **__lowerCamelCase )
if (
isinstance(args[0] , __lowerCamelCase )
and all(isinstance(__lowerCamelCase , __lowerCamelCase ) for el in args[0] )
and all(len(__lowerCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **__lowerCamelCase : str ) -> Optional[int]:
a = self._parse_and_tokenize(__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase )
return inputs
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> Optional[int]:
if self.framework == "pt":
a , a = model_inputs["input_ids"].shape
elif self.framework == "tf":
a , a = tf.shape(model_inputs["input_ids"] ).numpy()
a = generate_kwargs.get("min_length" , self.model.config.min_length )
a = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(__lowerCamelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
a = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
a = output_ids.shape[0]
if self.framework == "pt":
a = output_ids.reshape(__lowerCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
a = tf.reshape(__lowerCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=ReturnType.TEXT , __lowerCamelCase : List[Any]=False ) -> int:
a = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
a = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
a = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
}
records.append(__lowerCamelCase )
return records
@add_end_docstrings(_UpperCamelCase )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """summary"""
def __call__( self : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ) -> Optional[Any]:
return super().__call__(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCamelCase )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """translation"""
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> Dict:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def __UpperCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : Optional[int] , __lowerCamelCase : Any=TruncationStrategy.DO_NOT_TRUNCATE , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None ) -> Optional[Any]:
if getattr(self.tokenizer , "_build_translation_inputs" , __lowerCamelCase ):
return self.tokenizer._build_translation_inputs(
*__lowerCamelCase , return_tensors=self.framework , truncation=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase )
else:
return super()._parse_and_tokenize(*__lowerCamelCase , truncation=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any=None , __lowerCamelCase : str=None , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
a , a , a = super()._sanitize_parameters(**__lowerCamelCase )
if src_lang is not None:
a = src_lang
if tgt_lang is not None:
a = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
a = kwargs.get("task" , self.task )
a = task.split("_" )
if task and len(__lowerCamelCase ) == 4:
# translation, XX, to YY
a = items[1]
a = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ) -> Dict:
return super().__call__(*__lowerCamelCase , **__lowerCamelCase )
| 107 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 107 | 1 |
from PIL import Image
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_lowercase ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__A = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 75 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "roc_bert"
def __init__( self: Dict , __A: Optional[Any]=3_05_22 , __A: Dict=7_68 , __A: Optional[Any]=12 , __A: int=12 , __A: List[Any]=30_72 , __A: int="gelu" , __A: Optional[Any]=0.1 , __A: Tuple=0.1 , __A: int=5_12 , __A: Optional[Any]=2 , __A: Optional[Any]=0.02 , __A: Any=1e-12 , __A: Optional[Any]=True , __A: int=0 , __A: Union[str, Any]="absolute" , __A: Tuple=None , __A: Dict=True , __A: List[str]=True , __A: Union[str, Any]=7_68 , __A: List[str]=9_10 , __A: Any=5_12 , __A: List[Any]=2_48_58 , __A: int=True , **__A: str , ) -> Tuple:
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
_A = use_cache
_A = enable_pronunciation
_A = enable_shape
_A = pronunciation_embed_dim
_A = pronunciation_vocab_size
_A = shape_embed_dim
_A = shape_vocab_size
_A = concat_input
_A = position_embedding_type
_A = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 75 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : Union[str, Any] ,_snake_case : Tuple=12 ,_snake_case : Optional[int]=7 ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Optional[int]=True ,_snake_case : Union[str, Any]=99 ,_snake_case : int=32 ,_snake_case : int=32 ,_snake_case : List[Any]=2 ,_snake_case : int=4 ,_snake_case : Union[str, Any]=37 ,_snake_case : Dict=0.1 ,_snake_case : int=0.1 ,_snake_case : int=512 ,_snake_case : Tuple=0.02 ,_snake_case : Optional[Any]=0 ,_snake_case : Union[str, Any]=None ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : Dict = use_input_mask
lowercase__ : Union[str, Any] = use_labels
lowercase__ : str = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = projection_dim
lowercase__ : str = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Tuple = initializer_range
lowercase__ : Union[str, Any] = scope
lowercase__ : Union[str, Any] = bos_token_id
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ : Any = input_mask.numpy()
lowercase__ , lowercase__ : List[str] = input_mask.shape
lowercase__ : Tuple = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Any = 0
lowercase__ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Dict ,_snake_case : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = TFBlipTextModel(config=_snake_case )
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,training=_snake_case )
lowercase__ : List[Any] = model(_snake_case ,training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = BlipTextModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=True ) -> Optional[int]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = IMAGENET_DEFAULT_MEAN, lowerCAmelCase__ = IMAGENET_DEFAULT_STD, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = size if size is not None else {'shortest_edge': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
snake_case_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size')
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ = int((256 / 224) * size['shortest_edge'])
snake_case_ = get_resize_output_image_size(lowerCAmelCase__, size=lowerCAmelCase__, default_to_square=lowerCAmelCase__)
snake_case_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}')
return resize(
lowerCAmelCase__, size=(size_dict['height'], size_dict['width']), resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(lowerCAmelCase__, size=(size['height'], size['width']), data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> BatchFeature:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
snake_case_ = [self.resize(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(lowerCAmelCase__, lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(lowerCAmelCase__, lowerCAmelCase__) for image in images]
if do_normalize:
snake_case_ = [self.normalize(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
| 312 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self :str , a :Optional[int] , a :List[str]=7 , a :List[str]=3 , a :Dict=1_8 , a :Union[str, Any]=3_0 , a :List[Any]=4_0_0 , a :List[Any]=True , a :int=None , a :Optional[int]=True , ) -> List[str]:
__UpperCamelCase : Optional[int] = size if size is not None else {"height": 1_8, "width": 1_8}
__UpperCamelCase : Optional[int] = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : int = image_size
__UpperCamelCase : int = min_resolution
__UpperCamelCase : Dict = max_resolution
__UpperCamelCase : Tuple = do_resize
__UpperCamelCase : int = size
__UpperCamelCase : List[str] = apply_ocr
def _lowerCamelCase ( self :int ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCamelCase ( self :Any ) -> Any:
__UpperCamelCase : int = LayoutLMvaImageProcessingTester(self )
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self :str ) -> int:
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "apply_ocr" ) )
def _lowerCamelCase ( self :Dict ) -> List[Any]:
__UpperCamelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
__UpperCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self :int ) -> List[Any]:
# Initialize image_processing
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
__UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a )
self.assertIsInstance(encoding.boxes , a )
# Test batched
__UpperCamelCase : str = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCamelCase ( self :Any ) -> Union[str, Any]:
# Initialize image_processing
__UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
__UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCamelCase : List[str] = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCamelCase ( self :Tuple ) -> str:
# Initialize image_processing
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
__UpperCamelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__UpperCamelCase : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCamelCase ( self :Tuple ) -> str:
# with apply_OCR = True
__UpperCamelCase : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase : Union[str, Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__UpperCamelCase : Dict = Image.open(ds[0]["file"] ).convert("RGB" )
__UpperCamelCase : List[str] = image_processing(a , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase : int = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__UpperCamelCase : Optional[Any] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a )
self.assertListEqual(encoding.boxes , a )
# with apply_OCR = False
__UpperCamelCase : int = LayoutLMvaImageProcessor(apply_ocr=a )
__UpperCamelCase : int = image_processing(a , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) | 232 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return 3_2
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return 3_2
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :str ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : List[Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : int = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.dummy_prior
__UpperCamelCase : int = self.dummy_image_encoder
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : Optional[Any] = self.dummy_image_processor
__UpperCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
__UpperCamelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any:
if str(a ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
__UpperCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) )
__UpperCamelCase : int = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__UpperCamelCase : Union[str, Any] = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : str = torch_device == "cpu"
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[Any] = torch_device == "cpu"
__UpperCamelCase : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , ) | 232 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , **UpperCAmelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=UpperCAmelCase , strides=UpperCAmelCase , padding="VALID" , groups=UpperCAmelCase , use_bias=UpperCAmelCase , name="convolution" , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.convolution(self.padding(UpperCAmelCase ) )
lowercase_ = self.normalization(UpperCAmelCase )
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def A__ ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = shape_list(UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase = 2 , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = tf.keras.layers.ConvaD(
filters=UpperCAmelCase , kernel_size=1 , strides=UpperCAmelCase , use_bias=UpperCAmelCase , name="convolution" )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(UpperCAmelCase ) , training=UpperCAmelCase )
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name="pooler" )
lowercase_ = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = self.pooler(UpperCAmelCase )
for layer_module in self.attention:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = hidden_state * pooled
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name="layer.2" ),
]
lowercase_ = ACTaFN[config.hidden_act]
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(UpperCAmelCase , stride=UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowercase_ = [
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
UpperCAmelCase , stride=UpperCAmelCase , groups=UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase , name="layer.3" ),
]
lowercase_ = ACTaFN[config.hidden_act]
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
lowercase_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowercase_ = self.activation(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , name="layers.0" ),
*[layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
for layer_module in self.layers:
lowercase_ = layer_module(UpperCAmelCase )
return hidden_state
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase , name=F'stages.{i+1}' ) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(UpperCAmelCase )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase )
@keras_serializable
class __lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
lowerCAmelCase__ = RegNetConfig
def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(UpperCAmelCase , name="embedder" )
lowercase_ = TFRegNetEncoder(UpperCAmelCase , name="encoder" )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase , name="pooler" )
@unpack_inputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = RegNetConfig
lowerCAmelCase__ = "regnet"
lowerCAmelCase__ = "pixel_values"
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE__ = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , snake_case_ , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowercase_ = TFRegNetMainLayer(UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , snake_case_ , )
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(UpperCAmelCase , name="regnet" )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , training=UpperCAmelCase )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](UpperCAmelCase )
lowercase_ = self.classifier[1](UpperCAmelCase )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase , logits=UpperCAmelCase )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
| 359 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297 | 0 |
import os
def _SCREAMING_SNAKE_CASE ( ):
A_ : Union[str, Any] = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE ) )
A_ : Dict = os.path.join(SCREAMING_SNAKE_CASE , '''triangle.txt''' )
with open(SCREAMING_SNAKE_CASE ) as f:
A_ : List[str] = f.readlines()
A_ : Optional[Any] = []
for line in triangle:
A_ : Tuple = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(SCREAMING_SNAKE_CASE ) )
a.append(SCREAMING_SNAKE_CASE )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(len(a[i] ) ):
A_ : Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
A_ : Optional[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 186 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase = None
UpperCamelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = True
snake_case = None
# Automatically constructed
snake_case = "PIL.Image.Image"
snake_case = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
snake_case = field(default="Image" , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self )->int:
'''simple docstring'''
return self.pa_type
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
A_ : List[str] = {}
A_ , A_ : str = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
A_ : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = path.split('''::''' )[-1]
try:
A_ : int = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['''repo_id''']
A_ : Optional[int] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
A_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
A_ : Optional[Any] = BytesIO(f.read() )
A_ : Dict = PIL.Image.open(bytes_ )
else:
A_ : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _snake_case ( self )->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
A_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
A_ : Tuple = storage.field('''bytes''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
A_ : Optional[Any] = storage.field('''path''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : str = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Any = f.read()
return bytes_
A_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : List[Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
A_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = BytesIO()
if image.format in list_image_compression_formats():
A_ : Tuple = image.format
else:
A_ : List[str] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(SCREAMING_SNAKE_CASE , format=SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if hasattr(SCREAMING_SNAKE_CASE , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
A_ : Union[str, Any] = array.dtype
A_ : Dict = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
A_ : Any = dtype.kind
A_ : Any = dtype.itemsize
A_ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : List[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Any = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = np.dtype(SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A_ : Tuple = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
A_ , A_ : Union[str, Any] = first_non_null_value(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Tuple = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A_ : List[str] = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 186 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A__ :
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: str="resnet50" , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Tuple=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]=3 , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[str] = out_indices if out_indices is not None else [4]
__lowerCAmelCase : Tuple = stage_names
__lowerCAmelCase : Union[str, Any] = out_features
__lowerCAmelCase : Optional[int] = backbone
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Any = use_pretrained_backbone
__lowerCAmelCase : Tuple = is_training
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase : str = self.get_config()
return config, pixel_values
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = TimmBackbone(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = TimmBackboneModelTester(self)
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = "resnet18"
__lowerCAmelCase : List[Any] = "microsoft/resnet-18"
__lowerCAmelCase : Tuple = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__lowerCAmelCase : Tuple = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3])
__lowerCAmelCase : str = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip("TimmBackbone doesn't support feed forward chunking")
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute")
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side")
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint")
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.")
def _SCREAMING_SNAKE_CASE ( self: str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions.")
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm.")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase : Tuple = self.all_model_classes[0]
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase : str = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(**_SCREAMING_SNAKE_CASE)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase : Union[str, Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__lowerCAmelCase : List[str] = copy.deepcopy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Optional[Any] = model(**_SCREAMING_SNAKE_CASE) | 58 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowercase ( __snake_case ,__snake_case = 2 ,__snake_case = 1 ,__snake_case = 3 ,) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case ,__snake_case ,__snake_case ) -> int:
return (pow(__snake_case ,2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__lowerCAmelCase : Union[str, Any] = seed
__lowerCAmelCase : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__lowerCAmelCase : Tuple = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : Any = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : List[Any] = rand_fn(__snake_case ,__snake_case ,__snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__lowerCAmelCase : List[Any] = gcd(hare - tortoise ,__snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__lowerCAmelCase : str = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__snake_case : List[str] = parser.parse_args()
__snake_case : List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
__snake_case : Any = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""") | 58 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_a = Features({'audio': Audio()} )
_a = Features({'labels': ClassLabel} )
_a = "audio"
_a = "labels"
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[int] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __lowercase ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 155 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def lowercase (snake_case__ : Accelerator , snake_case__ : int = 16 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def lowercase (snake_case__ : Any , snake_case__ : List[str] ) -> str:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCAmelCase = 2
# New Code #
lowerCAmelCase = int(args.gradient_accumulation_steps )
lowerCAmelCase = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config["""lr"""]
lowerCAmelCase = int(config["""num_epochs"""] )
lowerCAmelCase = int(config["""seed"""] )
lowerCAmelCase = int(config["""batch_size"""] )
lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(snake_case__ )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=snake_case__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 155 | 1 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : str ) -> Dict:
__SCREAMING_SNAKE_CASE = str(id_ )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {} # {vertex:distance}
def __lt__( self : Dict , UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
return self.key < other.key
def __repr__( self : int ) -> Tuple:
return self.id
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Tuple:
self.neighbors.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = weight
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase_ )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for u in graph:
__SCREAMING_SNAKE_CASE = math.inf
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = graph[:]
while q:
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ )
q.remove(lowerCAmelCase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE = u
__SCREAMING_SNAKE_CASE = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for u in graph:
__SCREAMING_SNAKE_CASE = math.inf
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = list(lowerCAmelCase_ )
hq.heapify(lowerCAmelCase_ )
while h:
__SCREAMING_SNAKE_CASE = hq.heappop(lowerCAmelCase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__SCREAMING_SNAKE_CASE = u
__SCREAMING_SNAKE_CASE = u.edges[v.id]
hq.heapify(lowerCAmelCase_ )
for i in range(1 , len(lowerCAmelCase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ():
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 |
"""simple docstring"""
from datetime import datetime
import requests
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__SCREAMING_SNAKE_CASE = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
a__ : str = input('''Enter Video/IGTV url: ''').strip()
a__ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 195 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any:
a_ : Tuple = parent
a_ : int = batch_size
a_ : Tuple = seq_length
a_ : List[Any] = is_training
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Any = vocab_size
a_ : List[str] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : int = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : str = scope
a_ : Tuple = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Any = self.num_labels
a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]:
a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : str = inputs_dict['labels']
a_ : Optional[int] = inputs_dict['labels']
a_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : str = OpenAIGPTModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
a_ : Tuple = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 32 | import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Tuple ,__lowercase : Dict=False ):
'''simple docstring'''
A_ : str = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
A_ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
A_ : Tuple = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
A_ : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
A_ : Any = checkpoint[f'''{old_prefix}.skip_connection.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : List[str] ,__lowercase : int ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any]=None ):
'''simple docstring'''
A_ , A_ , A_ : Tuple = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 ,dim=0 )
A_ , A_ , A_ : List[str] = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 ,dim=0 )
A_ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
A_ : str = checkpoint[f'''{old_prefix}.norm.bias''']
A_ : int = weight_q.squeeze(-1 ).squeeze(-1 )
A_ : int = bias_q.squeeze(-1 ).squeeze(-1 )
A_ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
A_ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
A_ : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
A_ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
A_ : Any = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
A_ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( __lowercase : str ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' )
A_ : Dict = {}
A_ : Dict = checkpoint['time_embed.0.weight']
A_ : Any = checkpoint['time_embed.0.bias']
A_ : Union[str, Any] = checkpoint['time_embed.2.weight']
A_ : Tuple = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
A_ : Dict = checkpoint['label_emb.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.bias']
A_ : str = unet_config['down_block_types']
A_ : List[str] = unet_config['layers_per_block']
A_ : Any = unet_config['attention_head_dim']
A_ : int = unet_config['block_out_channels']
A_ : Union[str, Any] = 1
A_ : List[str] = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
A_ : List[Any] = channels_list[i]
A_ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
A_ : Any = f'''down_blocks.{i}.resnets.{j}'''
A_ : str = f'''input_blocks.{current_layer}.0'''
A_ : List[Any] = True if j == 0 and downsample_block_has_skip else False
A_ : Any = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
A_ : Dict = f'''down_blocks.{i}.resnets.{j}'''
A_ : Optional[int] = f'''input_blocks.{current_layer}.0'''
A_ : str = True if j == 0 and downsample_block_has_skip else False
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}'''
A_ : Union[str, Any] = f'''input_blocks.{current_layer}.1'''
A_ : Tuple = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : List[Any] = f'''down_blocks.{i}.downsamplers.0'''
A_ : Dict = f'''input_blocks.{current_layer}.0'''
A_ : Tuple = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
A_ : Tuple = current_channels
# hardcoded the mid-block for now
A_ : int = 'mid_block.resnets.0'
A_ : Dict = 'middle_block.0'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 'mid_block.attentions.0'
A_ : Any = 'middle_block.1'
A_ : Tuple = convert_attention(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = 'mid_block.resnets.1'
A_ : Any = 'middle_block.2'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 0
A_ : Optional[Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A_ : Dict = f'''up_blocks.{i}.resnets.{j}'''
A_ : Dict = f'''output_blocks.{current_layer}.0'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Union[str, Any] = f'''up_blocks.{i}.upsamplers.0'''
A_ : List[str] = f'''output_blocks.{current_layer-1}.1'''
A_ : Optional[Any] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A_ : int = f'''up_blocks.{i}.resnets.{j}'''
A_ : Union[str, Any] = f'''output_blocks.{current_layer}.0'''
A_ : Optional[int] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
A_ : Any = f'''output_blocks.{current_layer}.1'''
A_ : List[str] = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Dict = f'''up_blocks.{i}.upsamplers.0'''
A_ : Any = f'''output_blocks.{current_layer-1}.2'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Any = checkpoint['out.0.weight']
A_ : Dict = checkpoint['out.0.bias']
A_ : int = checkpoint['out.2.weight']
A_ : List[str] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = strabool(args.class_cond)
_UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_UpperCAmelCase = None
_UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 140 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_snake_case = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = "train"
__lowerCamelCase = "dev"
__lowerCamelCase = "test"
class lowerCAmelCase :
@staticmethod
def UpperCAmelCase ( _lowercase :Tuple , _lowercase :Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase ( _lowercase :str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase ( _lowercase :List[InputExample] , _lowercase :List[str] , _lowercase :int , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int]=False , _lowercase :List[str]="[CLS]" , _lowercase :Optional[int]=1 , _lowercase :Union[str, Any]="[SEP]" , _lowercase :str=False , _lowercase :Any=False , _lowercase :List[str]=0 , _lowercase :int=0 , _lowercase :List[str]=-1_00 , _lowercase :List[str]=0 , _lowercase :Any=True , ):
'''simple docstring'''
lowercase__ = {label: i for i, label in enumerate(_lowercase )}
lowercase__ = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d of %d" , _lowercase , len(_lowercase ) )
lowercase__ = []
lowercase__ = []
for word, label in zip(example.words , example.labels ):
lowercase__ = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowercase__ = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
lowercase__ = tokens[: (max_seq_length - special_tokens_count)]
lowercase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowercase__ = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowercase__ = [cls_token] + tokens
lowercase__ = [pad_token_label_id] + label_ids
lowercase__ = [cls_token_segment_id] + segment_ids
lowercase__ = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowercase__ = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
lowercase__ = max_seq_length - len(_lowercase )
if pad_on_left:
lowercase__ = ([pad_token] * padding_length) + input_ids
lowercase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowercase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowercase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(_lowercase ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(_lowercase ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(_lowercase ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(_lowercase ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self :Tuple , _lowercase :TokenClassificationTask , _lowercase :str , _lowercase :PreTrainedTokenizer , _lowercase :List[str] , _lowercase :str , _lowercase :Optional[int] = None , _lowercase :Optional[Any]=False , _lowercase :Split = Split.train , ):
'''simple docstring'''
lowercase__ = os.path.join(
_lowercase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
lowercase__ = torch.load(_lowercase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
lowercase__ = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _lowercase )
def __len__( self :int ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Optional[Any] , _lowercase :List[Any] ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = -100
def __init__( self :Optional[int] , _lowercase :TokenClassificationTask , _lowercase :str , _lowercase :PreTrainedTokenizer , _lowercase :List[str] , _lowercase :str , _lowercase :Optional[int] = None , _lowercase :Any=False , _lowercase :Split = Split.train , ):
'''simple docstring'''
lowercase__ = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = tf.data.Dataset.from_generator(
_lowercase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowercase__ = tf.data.Dataset.from_generator(
_lowercase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :Optional[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :List[str] , _lowercase :Union[str, Any] ):
'''simple docstring'''
return self.features[i]
| 353 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _A ( ):
lowercase__ = HfArgumentParser(__magic_name__ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(__magic_name__ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(__magic_name__ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 201 | 0 |
'''simple docstring'''
def snake_case_ (_a : int ):
if not isinstance(_a , _a ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( lowerCamelCase__ ):
'''simple docstring'''
if num <= 0:
A_ : str = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__snake_case )
A_ : Tuple = [True] * (num + 1)
A_ : int = []
A_ : int = 2
A_ : List[str] = int(math.sqrt(__snake_case ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__snake_case )
# Set multiples of start be False
for i in range(start * start , num + 1 , __snake_case ):
if sieve[i] is True:
A_ : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__snake_case )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip()))) | 364 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase :str = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
A_ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
A_ : Tuple = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : List[Any] = do_resize
A_ : List[str] = size
A_ : Dict = resample
A_ : int = do_rescale
A_ : str = rescale_factor
A_ : Tuple = do_center_crop
A_ : Tuple = crop_size
A_ : List[str] = do_flip_channel_order
def _a (self , lowercase , lowercase , lowercase = PIL.Image.BILINEAR , lowercase = None , **lowercase , ):
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Any = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
A_ : Tuple = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
return flip_channel_order(lowercase , data_format=lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : Optional[int] = resample if resample is not None else self.resample
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
A_ : Union[str, Any] = size if size is not None else self.size
A_ : Dict = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : Union[str, Any] = get_size_dict(lowercase , param_name="""crop_size""" )
A_ : Union[str, Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
A_ : Optional[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : List[Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : str = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : Dict = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
A_ : List[str] = [self.flip_channel_order(image=lowercase ) for image in images]
A_ : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : str = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase ):
A_ : Dict = target_sizes.numpy()
A_ : Union[str, Any] = []
for idx in range(len(lowercase ) ):
A_ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase )
A_ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
A_ : str = logits.argmax(dim=1 )
A_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 135 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase__ = [8, 5, 9, 7]
lowerCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : list[int] ,lowercase__ : list[list[int]] ,lowercase__ : list[list[int]] ,):
__lowercase = claim_vector
__lowercase = allocated_resources_table
__lowercase = maximum_claim_table
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE ( self : str ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE ( self : Any ):
return {self.__need().index(lowercase__ ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE ( self : List[str] ,**lowercase__ : List[Any] ):
__lowercase = self.__need()
__lowercase = self.__allocated_resources_table
__lowercase = self.__available_resources()
__lowercase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 5_0 + '''\n''' )
while need_list:
__lowercase = False
for each_need in need_list:
__lowercase = True
for index, need in enumerate(lowercase__ ):
if need > available_resources[index]:
__lowercase = False
break
if execution:
__lowercase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(lowercase__ )
# update available/freed resources stack
__lowercase = np.array(lowercase__ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(lowercase__ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(lowercase__ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(lowercase__ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(lowercase__ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(lowercase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = {}
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if vertex not in self.adjacency:
__lowerCamelCase = {}
self.num_vertices += 1
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
self.add_vertex(lowerCamelCase__ )
self.add_vertex(lowerCamelCase__ )
if head == tail:
return
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = list(edges[i] )
edges.sort(key=lambda lowerCamelCase__ : e[2] )
for i in range(len(lowerCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCamelCase = edges[i][2] + 1
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = weight
__lowerCamelCase = weight
def __str__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCamelCase = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowercase_ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> str:
'''simple docstring'''
__lowerCamelCase = Graph()
if vertices is None:
__lowerCamelCase = []
if edges is None:
__lowerCamelCase = []
for vertex in vertices:
g.add_vertex(lowerCamelCase__ )
for edge in edges:
g.add_edge(*lowerCamelCase__ )
return g
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.parent )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase__ )
__lowerCamelCase = item
__lowerCamelCase = 0
return item
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase__ )
if item != self.parent[item]:
__lowerCamelCase = self.find(self.parent[item] )
return self.parent[item]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.find(lowerCamelCase__ )
__lowerCamelCase = self.find(lowerCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCamelCase = roota
return roota
return None
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = graph.num_vertices
__lowerCamelCase = Graph.UnionFind()
__lowerCamelCase = []
while num_components > 1:
__lowerCamelCase = {}
for vertex in graph.get_vertices():
__lowerCamelCase = -1
__lowerCamelCase = graph.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = union_find.find(lowerCamelCase__ )
__lowerCamelCase = union_find.find(lowerCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = cheap_edge[vertex]
if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ):
union_find.union(lowerCamelCase__ , lowerCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
__lowerCamelCase = num_components - 1
__lowerCamelCase = Graph.build(edges=lowerCamelCase__ )
return mst
| 90 | 0 |
'''simple docstring'''
__snake_case : int = 8.314462 # Unit - J mol-1 K-1
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : list[int] ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) == len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ = getattr(__UpperCamelCase , __UpperCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE__ = new_module
SCREAMING_SNAKE_CASE__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
SCREAMING_SNAKE_CASE__ = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ = getattr(__UpperCamelCase , __UpperCamelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
else:
SCREAMING_SNAKE_CASE__ = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ = old_value.to(__UpperCamelCase )
elif isinstance(__UpperCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = value.to("""cpu""" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCamelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __UpperCamelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ = new_value.T
SCREAMING_SNAKE_CASE__ = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.IntaParams(__UpperCamelCase , requires_grad=__UpperCamelCase , **__UpperCamelCase ).to(__UpperCamelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.Paramsabit(__UpperCamelCase , requires_grad=__UpperCamelCase , **__UpperCamelCase ).to(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__UpperCamelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ = old_value.to(__UpperCamelCase )
elif isinstance(__UpperCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = value.to(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCamelCase , device=__UpperCamelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ = new_value
else:
SCREAMING_SNAKE_CASE__ = nn.Parameter(__UpperCamelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ = new_value
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=False ) -> str:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ = []
current_key_name.append(__UpperCamelCase )
if (isinstance(__UpperCamelCase , nn.Linear ) or isinstance(__UpperCamelCase , __UpperCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__UpperCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ = module.in_features
SCREAMING_SNAKE_CASE__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ = bnb.nn.LinearabitLt(
__UpperCamelCase , __UpperCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ = bnb.nn.Linearabit(
__UpperCamelCase , __UpperCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ = type(__UpperCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__UpperCamelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_linear(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_been_replaced=__UpperCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : str=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_linear(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __SCREAMING_SNAKE_CASE ( *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __UpperCamelCase , )
return replace_with_bnb_linear(*__UpperCamelCase , **__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __UpperCamelCase , )
return set_module_quantized_tensor_to_device(*__UpperCamelCase , **__UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ = find_tied_parameters(__UpperCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ = sum(__UpperCamelCase , [] )
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ = not hasattr(__UpperCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ = list(model.named_children() )
SCREAMING_SNAKE_CASE__ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ = set(__UpperCamelCase ) - set(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = list(set(__UpperCamelCase ) ) + list(__UpperCamelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ = name.replace(__UpperCamelCase , """""" )
filtered_module_names.append(__UpperCamelCase )
return filtered_module_names
| 219 | import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Dict = '''Hello, World!'''
__lowerCamelCase : Optional[Any] = '''en_XX'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path("""data_bin""" )
SCREAMING_SNAKE_CASE__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(__UpperCamelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(__UpperCamelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE__ = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(__UpperCamelCase ) )
else:
SCREAMING_SNAKE_CASE__ = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 219 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCamelCase__ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__a)
__A : List[Any] = size if size is not None else {'height': 224, 'width': 224}
__A : List[Any] = get_size_dict(__a)
__A : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : List[Any] = get_size_dict(__a , default_to_square=__a , param_name='crop_size')
__A : Optional[int] = do_resize
__A : Tuple = do_rescale
__A : Optional[Any] = do_normalize
__A : int = do_center_crop
__A : Union[str, Any] = crop_size
__A : Union[str, Any] = size
__A : str = resample
__A : Any = rescale_factor
__A : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__A : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = get_size_dict(__a)
if "shortest_edge" in size:
__A : Optional[int] = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__A : Optional[int] = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Dict = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase):
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Any = do_resize if do_resize is not None else self.do_resize
__A : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : int = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__A : Dict = get_size_dict(__a , param_name='crop_size' , default_to_square=__a)
__A : str = resample if resample is not None else self.resample
__A : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Dict = image_mean if image_mean is not None else self.image_mean
__A : Tuple = image_std if image_std is not None else self.image_std
__A : Optional[int] = size if size is not None else self.size
__A : Any = get_size_dict(__a)
if not is_batched(__a):
__A : Any = [images]
if not valid_images(__a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
__A : str = [to_numpy_array(__a) for image in images]
if do_resize:
__A : Tuple = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
__A : List[Any] = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
__A : Tuple = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
__A : List[Any] = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
__A : int = [to_channel_dimension_format(__a , __a) for image in images]
__A : List[Any] = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a) | 368 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 0
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
# Check that tokenizer_type ≠ model_type
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with pytest.raises(_UpperCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__A : List[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__A : str = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TOKENIZER_MAPPING.values()
__A : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase) , _UpperCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCAmelCase)
__A : str = 'Hello, world. How are you?'
__A : List[str] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__A : Dict = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCAmelCase)
__A : List[Any] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 3_0000)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = get_tokenizer_config('bert-base-cased')
__A : Optional[int] = config.pop('_commit_hash' , _UpperCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__A : Dict = get_tokenizer_config(_UpperCAmelCase)
self.assertDictEqual(_UpperCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = get_tokenizer_config(_UpperCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
__A : Optional[Any] = CustomTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : int = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
# Can register in two steps
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Optional[int] = BertTokenizerFast.from_pretrained(_UpperCAmelCase)
bert_tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = CustomTokenizerFast.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase):
__A : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase):
__A : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
__A : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = False
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# If remote code is not set, the default is to use local
__A : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Any = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__A : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0) | 190 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : List[str] = """Pix2StructImageProcessor"""
__lowerCAmelCase : Optional[int] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : List[str] = False
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : Union[str, Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Union[bool, str, TruncationStrategy] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = 20_48 , _lowerCamelCase : int = 0 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[str, TensorType]] = None , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
A_ : str = self.tokenizer
A_ : Union[str, Any] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A_ : Tuple = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , **_lowerCamelCase )
else:
# add pixel_values and bbox
A_ : Any = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , max_patches=_lowerCamelCase , header_text=_lowerCamelCase , **_lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
A_ : Dict = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
if "attention_mask" in text_encoding:
A_ : Dict = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
A_ : Dict = text_encoding.pop('''input_ids''' )
else:
A_ : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCamelCase )
return encoding_image_processor
def a_ ( self : Optional[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : Optional[int] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 167 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """transfo-xl"""
__lowerCAmelCase : Optional[Any] = ["""mems"""]
__lowerCAmelCase : List[str] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , _lowerCamelCase : List[Any]=26_77_35 , _lowerCamelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _lowerCamelCase : str=10_24 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : int=64 , _lowerCamelCase : Optional[int]=40_96 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=False , _lowerCamelCase : Union[str, Any]=18 , _lowerCamelCase : Optional[Any]=16_00 , _lowerCamelCase : Optional[int]=10_00 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]="normal" , _lowerCamelCase : int=0.01 , _lowerCamelCase : List[str]=0.01 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-5 , _lowerCamelCase : int=0 , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[Any] = vocab_size
A_ : str = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
A_ : str = [False] + [True] * len(self.cutoffs )
else:
A_ : str = [False] + [False] * len(self.cutoffs )
A_ : Optional[Any] = d_model
A_ : Dict = d_embed
A_ : List[str] = d_head
A_ : List[Any] = d_inner
A_ : Dict = div_val
A_ : int = pre_lnorm
A_ : Optional[Any] = n_layer
A_ : List[Any] = n_head
A_ : List[Any] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Any = clamp_len
A_ : Dict = sample_softmax
A_ : List[Any] = adaptive
A_ : Union[str, Any] = dropout
A_ : List[Any] = dropatt
A_ : Any = untie_r
A_ : Optional[int] = init
A_ : int = init_range
A_ : List[Any] = proj_init_std
A_ : Union[str, Any] = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a_ ( self : Any , _lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 167 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase: Any = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Any = b.T
_lowercase : Union[str, Any] = np.sum(np.square(__UpperCAmelCase ) , axis=1 )
_lowercase : int = np.sum(np.square(__UpperCAmelCase ) , axis=0 )
_lowercase : Union[str, Any] = np.matmul(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : int = aa[:, None] - 2 * ab + ba[None, :]
return d
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = x.reshape(-1 , 3 )
_lowercase : List[str] = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase )
return np.argmin(__UpperCAmelCase , axis=1 )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = True ,UpperCAmelCase_ = True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Dict = size if size is not None else {"""height""": 2_56, """width""": 2_56}
_lowercase : Tuple = get_size_dict(UpperCAmelCase_ )
_lowercase : Optional[Any] = np.array(UpperCAmelCase_ ) if clusters is not None else None
_lowercase : Any = do_resize
_lowercase : Tuple = size
_lowercase : Dict = resample
_lowercase : Dict = do_normalize
_lowercase : Union[str, Any] = do_color_quantize
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : List[str] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
UpperCAmelCase_ ,size=(size["""height"""], size["""width"""]) ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,):
_lowercase : Tuple = rescale(image=UpperCAmelCase_ ,scale=1 / 127.5 ,data_format=UpperCAmelCase_ )
_lowercase : Dict = image - 1
return image
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Dict = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = size if size is not None else self.size
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : int = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Any = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_lowercase : Any = clusters if clusters is not None else self.clusters
_lowercase : Any = np.array(UpperCAmelCase_ )
_lowercase : Optional[int] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
_lowercase : Optional[Any] = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : int = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : Dict = [self.normalize(image=UpperCAmelCase_ ) for image in images]
if do_color_quantize:
_lowercase : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_lowercase : int = np.array(UpperCAmelCase_ )
_lowercase : Optional[int] = color_quantize(UpperCAmelCase_ ,UpperCAmelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_lowercase : Dict = images.shape[0]
_lowercase : Optional[int] = images.reshape(UpperCAmelCase_ ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_lowercase : Tuple = list(UpperCAmelCase_ )
else:
_lowercase : List[str] = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = {"""input_ids""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[int], _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase, config=_lowerCAmelCase )
_a = downstream_dict['''projector.weight''']
_a = downstream_dict['''projector.bias''']
_a = downstream_dict['''model.post_net.linear.weight''']
_a = downstream_dict['''model.post_net.linear.bias''']
return model
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Any, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase, config=_lowerCAmelCase )
_a = downstream_dict['''model.linear.weight''']
_a = downstream_dict['''model.linear.bias''']
return model
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = WavaVecaForXVector.from_pretrained(_lowerCAmelCase, config=_lowerCAmelCase )
_a = downstream_dict['''connector.weight''']
_a = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_a = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_a = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_a = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : int, _lowerCAmelCase : Optional[int], _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_a = torch.load(_lowerCAmelCase, map_location='''cpu''' )
_a = checkpoint['''Downstream''']
_a = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_a = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase, return_attention_mask=_lowerCAmelCase, do_normalize=_lowerCAmelCase )
_a = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_a = convert_classification(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
_a = convert_diarization(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
elif arch.endswith('''ForXVector''' ):
_a = convert_xvector(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_a = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__snake_case = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 320 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) | 320 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = load_tool('text-classification' )
self.tool.setup()
lowerCamelCase : Tuple = load_tool('text-classification' , remote=UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Tuple = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(UpperCAmelCase_ , 'positive' )
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : List[Any] = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(UpperCAmelCase_ , 'positive' )
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : List[str] = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(UpperCAmelCase_ , 'positive' )
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : List[str] = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(UpperCAmelCase_ , 'positive' )
| 351 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'big_bird'
def __init__( self , UpperCAmelCase_=50358 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu_new" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=4096 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=66 , UpperCAmelCase_="block_sparse" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=64 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : int = initializer_range
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : int = use_cache
lowerCamelCase : Tuple = rescale_embeddings
lowerCamelCase : Dict = attention_type
lowerCamelCase : Optional[int] = use_bias
lowerCamelCase : Optional[int] = block_size
lowerCamelCase : Tuple = num_random_blocks
lowerCamelCase : List[Any] = classifier_dropout
class _lowercase ( __UpperCAmelCase ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 205 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
if num < 0:
return False
_A : int = num
_A : int = 0
while num > 0:
_A : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "PIL.Image.Image"
UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase = field(default='''Image''' , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> Tuple:
return self.pa_type
def snake_case_( self , A ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = np.array(A )
if isinstance(A , A ):
return {"path": value, "bytes": None}
elif isinstance(A , A ):
return {"path": None, "bytes": value}
elif isinstance(A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A )
elif isinstance(A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def snake_case_( self , A , A=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(A ):
_SCREAMING_SNAKE_CASE = PIL.Image.open(A )
else:
_SCREAMING_SNAKE_CASE = path.split("""::""" )[-1]
try:
_SCREAMING_SNAKE_CASE = string_to_dict(A , config.HUB_DATASETS_URL )["""repo_id"""]
_SCREAMING_SNAKE_CASE = token_per_repo_id.get(A )
except ValueError:
_SCREAMING_SNAKE_CASE = None
with xopen(A , """rb""" , use_auth_token=A ) as f:
_SCREAMING_SNAKE_CASE = BytesIO(f.read() )
_SCREAMING_SNAKE_CASE = PIL.Image.open(bytes_ )
else:
_SCREAMING_SNAKE_CASE = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case_( self , A ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""bytes""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""path""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array(
[encode_np_array(np.array(A ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def snake_case_( self , A ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A ):
with xopen(A , """rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
return bytes_
_SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def lowerCamelCase ( ) ->List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_SCREAMING_SNAKE_CASE = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->bytes:
_SCREAMING_SNAKE_CASE = BytesIO()
if image.format in list_image_compression_formats():
_SCREAMING_SNAKE_CASE = image.format
else:
_SCREAMING_SNAKE_CASE = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->dict:
if hasattr(__lowerCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCamelCase ( __lowerCamelCase : np.ndarray ) ->dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
_SCREAMING_SNAKE_CASE = array.dtype
_SCREAMING_SNAKE_CASE = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
_SCREAMING_SNAKE_CASE = dtype.kind
_SCREAMING_SNAKE_CASE = dtype.itemsize
_SCREAMING_SNAKE_CASE = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_SCREAMING_SNAKE_CASE = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_SCREAMING_SNAKE_CASE = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_SCREAMING_SNAKE_CASE = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
_SCREAMING_SNAKE_CASE = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCamelCase ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) ->List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
_SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 58 | 0 |
def __magic_name__ ( __a : list , __a : int = 0 ):
'''simple docstring'''
UpperCamelCase__ = length or len(_snake_case )
UpperCamelCase__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCamelCase__ = list_data[i + 1], list_data[i]
UpperCamelCase__ = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ = '''src/transformers'''
# Matches is_xxx_available()
lowerCamelCase_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCamelCase_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCamelCase_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCamelCase_ = re.compile(r'''^\s*else:''')
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
if _re_test_backend.search(__a ) is None:
return None
UpperCamelCase__ = [b[0] for b in _re_backend.findall(__a )]
backends.sort()
return "_and_".join(__a )
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = 0
while line_index < len(__a ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__a ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__a ):
UpperCamelCase__ = _re_one_line_import_struct.search(__a ).groups()[0]
UpperCamelCase__ = re.findall(R"""\[([^\]]+)\]""" , __a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase__ = _re_import_struct_key_value.search(__a )
if single_line_import_search is not None:
UpperCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__a ) > 0]
objects.extend(__a )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase__ = lines[line_index]
if _re_import_struct_add_one.search(__a ) is not None:
objects.append(_re_import_struct_add_one.search(__a ).groups()[0] )
elif _re_import_struct_add_many.search(__a ) is not None:
UpperCamelCase__ = _re_import_struct_add_many.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_between_brackets.search(__a ) is not None:
UpperCamelCase__ = _re_between_brackets.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_quote_object.search(__a ) is not None:
objects.append(_re_quote_object.search(__a ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase__ = []
while (
line_index < len(__a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__a ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __magic_name__ ( __a : Dict , __a : Dict ):
'''simple docstring'''
def find_duplicates(__a : Optional[Any] ):
return [k for k, v in collections.Counter(__a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase__ = []
for key in import_dict_objects.keys():
UpperCamelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCamelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase__ = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
UpperCamelCase__ = os.path.join(__a , """__init__.py""" )
UpperCamelCase__ = parse_init(__a )
if objects is not None:
UpperCamelCase__ = analyze_results(*__a )
if len(__a ) > 0:
UpperCamelCase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(__a ) )
if len(__a ) > 0:
raise ValueError("""\n\n""".join(__a ) )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for path, directories, files in os.walk(__a ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__a ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase__ = str((Path(__a ) / folder).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(os.path.sep , """.""" )
submodules.append(__a )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase__ = str((Path(__a ) / fname).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__a )
return submodules
lowerCamelCase_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __magic_name__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCamelCase__ = direct_transformers_import(__a )
UpperCamelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__a , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase__ = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __a ) ) )
UpperCamelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 178 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def a ( lowerCamelCase_ ):
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
lowercase__ = ord(lowerCamelCase_ )
if not _is_chinese_char(lowerCamelCase_ ):
return 0
return 1
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = set()
for token in tokens:
lowercase__ = len(lowerCamelCase_ ) > 1 and is_chinese(lowerCamelCase_ )
if chinese_word:
word_set.add(lowerCamelCase_ )
lowercase__ = list(lowerCamelCase_ )
return word_list
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase__ = max([len(lowerCamelCase_ ) for w in chinese_word_set] )
lowercase__ = bert_tokens
lowercase__ , lowercase__ = 0, len(lowerCamelCase_ )
while start < end:
lowercase__ = True
if is_chinese(bert_word[start] ):
lowercase__ = min(end - start , lowerCamelCase_ )
for i in range(lowerCamelCase_ , 1 , -1 ):
lowercase__ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase__ = '''##''' + bert_word[j]
lowercase__ = start + i
lowercase__ = False
break
if single_word:
start += 1
return bert_word
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for i in range(0 , len(lowerCamelCase_ ) , 100 ):
lowercase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
lowercase__ = [get_chinese_word(lowerCamelCase_ ) for r in res]
ltp_res.extend(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
lowercase__ = []
for i in range(0 , len(lowerCamelCase_ ) , 100 ):
lowercase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
lowercase__ = []
for input_ids, chinese_word in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = []
for id in input_ids:
lowercase__ = bert_tokenizer._convert_id_to_token(lowerCamelCase_ )
input_tokens.append(lowerCamelCase_ )
lowercase__ = add_sub_symbol(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase_ ):
if token[:2] == "##":
lowercase__ = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase_ ) == 1 and _is_chinese_char(ord(lowerCamelCase_ ) ):
ref_id.append(lowerCamelCase_ )
ref_ids.append(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
return ref_ids
def a ( lowerCamelCase_ ):
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.strip() for line in data if len(lowerCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase__ = LTP(args.ltp ) # faster in GPU device
lowercase__ = BertTokenizer.from_pretrained(args.bert )
lowercase__ = prepare_ref(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ = [json.dumps(lowerCamelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCamelCase_ )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
A__ : Dict = parser.parse_args()
main(args)
| 207 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rt in rc.restypes:
lowercase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase__ = {name: i for i, name in enumerate(lowerCamelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase__ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
lowercase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase__ = rc.restype_atoa[restype_letter]
lowercase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ = rc.atom_order[atom_name]
lowercase__ = 1
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
return protein
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tree_map(lambda lowerCamelCase_ : torch.tensor(lowerCamelCase_ , device=batch['''aatype'''].device ) , lowerCamelCase_ , np.ndarray )
lowercase__ = tensor_tree_map(lambda lowerCamelCase_ : np.array(lowerCamelCase_ ) , make_atomaa_masks(lowerCamelCase_ ) )
return out
| 207 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__A = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__A = 0
__A = 0XE0_00
__A = 0XE0_01
__A = 0XE0_02
__A = 0XE0_03
__A = 0XE0_04
# Maps special codepoints to human-readable names.
__A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: """[CLS]""",
SEP: """[SEP]""",
BOS: """[BOS]""",
MASK: """[MASK]""",
PAD: """[PAD]""",
RESERVED: """[RESERVED]""",
}
# Maps special codepoint human-readable names to their codepoint values.
__A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=False , __UpperCAmelCase=2_0_4_8 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
lowerCAmelCase__ :Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
lowerCAmelCase__ :Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
lowerCAmelCase__ :int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
lowerCAmelCase__ :int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ :Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , model_max_length=__UpperCAmelCase , **__UpperCAmelCase , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase__ :Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase__ :List[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase__ :Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase__ :List[str] = UNICODE_VOCAB_SIZE
lowerCAmelCase__ :str = len(self._special_codepoints )
@property
def snake_case ( self ):
'''simple docstring'''
return self._unicode_vocab_size
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return list(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(F"invalid token: '{token}'" )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(F"invalid id: {index}" )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return "".join(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :int = [self.sep_token_id]
lowerCAmelCase__ :Any = [self.cls_token_id]
lowerCAmelCase__ :List[str] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :str = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
lowerCAmelCase__ :Optional[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return ()
| 366 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCAmelCase__ :Tuple = [144, 192, 240]
lowerCAmelCase__ :List[str] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCAmelCase__ :List[str] = [96, 120, 144]
lowerCAmelCase__ :List[Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCAmelCase__ :Union[str, Any] = [64, 80, 96]
lowerCAmelCase__ :int = [16, 16, 24, 48, 64, 80, 320]
lowerCAmelCase__ :Optional[Any] = 0.0_5
lowerCAmelCase__ :Tuple = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
lowerCAmelCase__ :int = 512
lowerCAmelCase__ :Optional[Any] = 16
lowerCAmelCase__ :int = 21
lowerCAmelCase__ :Tuple = 'pascal-voc-id2label.json'
else:
lowerCAmelCase__ :int = 1000
lowerCAmelCase__ :Optional[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ :Optional[int] = 'huggingface/label-files'
lowerCAmelCase__ :List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :Dict = idalabel
lowerCAmelCase__ :List[Any] = {v: k for k, v in idalabel.items()}
return config
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
lowerCAmelCase__ :List[Any] = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
lowerCAmelCase__ :Dict = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
lowerCAmelCase__ :List[str] = name.replace('.block.' , '.' )
if "exp_1x1" in name:
lowerCAmelCase__ :str = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
lowerCAmelCase__ :Any = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
lowerCAmelCase__ :List[Any] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
lowerCAmelCase__ :Any = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
lowerCAmelCase__ :List[str] = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
lowerCAmelCase__ :List[str] = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCAmelCase__ :Optional[int] = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCAmelCase__ :Dict = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
lowerCAmelCase__ :Dict = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
lowerCAmelCase__ :Optional[int] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
lowerCAmelCase__ :Tuple = name.replace(F".global_rep.{i}.weight" , '.layernorm.weight' )
if F".global_rep.{i}.bias" in name:
lowerCAmelCase__ :Any = name.replace(F".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
lowerCAmelCase__ :List[str] = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
lowerCAmelCase__ :int = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCAmelCase__ :Any = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
lowerCAmelCase__ :Optional[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
lowerCAmelCase__ :Optional[Any] = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
lowerCAmelCase__ :Tuple = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
lowerCAmelCase__ :Any = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
lowerCAmelCase__ :str = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
lowerCAmelCase__ :Optional[int] = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
lowerCAmelCase__ :Optional[int] = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
lowerCAmelCase__ :int = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCAmelCase__ :Optional[Any] = 'mobilevit.' + name
return name
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]:
"""simple docstring"""
if base_model:
lowerCAmelCase__ :Union[str, Any] = ''
else:
lowerCAmelCase__ :Tuple = 'mobilevit.'
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ :Union[str, Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
lowerCAmelCase__ :Optional[int] = key[8:]
if "qkv" in key:
lowerCAmelCase__ :Tuple = key.split('.' )
lowerCAmelCase__ :List[Any] = int(key_split[0][6:] ) - 1
lowerCAmelCase__ :Any = int(key_split[3] )
lowerCAmelCase__ :Optional[int] = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
lowerCAmelCase__ :Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCAmelCase__ :List[Any] = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
lowerCAmelCase__ :str = val[:dim, :]
lowerCAmelCase__ :int = val[dim : dim * 2, :]
lowerCAmelCase__ :Optional[int] = val[-dim:, :]
else:
lowerCAmelCase__ :Union[str, Any] = val[:dim]
lowerCAmelCase__ :Any = val[dim : dim * 2]
lowerCAmelCase__ :Tuple = val[-dim:]
else:
lowerCAmelCase__ :List[str] = val
return orig_state_dict
def __A () ->str:
"""simple docstring"""
lowerCAmelCase__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :int = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
lowerCAmelCase__ :Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
lowerCAmelCase__ :Optional[int] = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
lowerCAmelCase__ :Optional[int] = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase__ :Tuple = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase__ :List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase__ :Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCAmelCase__ :Optional[int] = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCAmelCase__ :Dict = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCAmelCase__ :Tuple = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowerCAmelCase__ :Union[str, Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
lowerCAmelCase__ :Any = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
lowerCAmelCase__ :str = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
lowerCAmelCase__ :Dict = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
lowerCAmelCase__ :str = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='apple' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 254 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowerCamelCase ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
a :Optional[int] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowerCamelCase ( ):
"""simple docstring"""
assert _test_patching.open is open
a :Dict = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , UpperCAmelCase_ ):
pass
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , UpperCAmelCase_ ) is None
with patch_submodule(_test_patching , '''len''' , UpperCAmelCase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = '''__test_patch_submodule_start_and_stop_mock__'''
a :Any = patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowerCamelCase ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
a :Dict = '''__test_patch_submodule_successive_join__'''
a :List[str] = '''__test_patch_submodule_successive_dirname__'''
a :Any = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ):
pass
| 94 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase ( ) -> tuple[list[int], int]:
UpperCamelCase : int = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCamelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
__UpperCAmelCase = make_dataset()
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, ...]:
for triplet in permutations(snake_case__ , 3 ):
if sum(snake_case__ ) == target:
return tuple(sorted(snake_case__ ) )
return (0, 0, 0)
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : int ) -> tuple[int, int, int]:
arr.sort()
UpperCamelCase : List[str] = len(snake_case__ )
for i in range(n - 1 ):
UpperCamelCase , UpperCamelCase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase ( ) -> tuple[float, float]:
UpperCamelCase : Any = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
UpperCamelCase : Optional[Any] = '\ntriplet_sum1(*dataset)\n'
UpperCamelCase : Dict = '\ntriplet_sum2(*dataset)\n'
UpperCamelCase : Optional[int] = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
UpperCamelCase : Any = repeat(setup=snake_case__ , stmt=snake_case__ , repeat=5 , number=10000 )
return (min(snake_case__ ), min(snake_case__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 119 | 0 |
snake_case : Any = 0 # The first color of the flag.
snake_case : Optional[int] = 1 # The second color of the flag.
snake_case : str = 2 # The third color of the flag.
snake_case : Optional[int] = (red, white, blue)
def __lowercase ( __lowerCAmelCase : list ):
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
a__ = 0
a__ = len(__lowerCAmelCase ) - 1
a__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
a__ , a__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a__ , a__ = sequence[high], sequence[mid]
high -= 1
else:
a__ = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Union[str, Any] = input('''Enter numbers separated by commas:\n''').strip()
snake_case : str = [int(item.strip()) for item in user_input.split(''',''')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 355 |
from collections import defaultdict
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ):
a__ = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Union[str, Any] = 1
snake_case__ : Union[str, Any] = 3
snake_case__ : Any = (32, 32)
snake_case__ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase )
return image
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Any = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(__UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Tuple = self.dummy_cond_unet_upscale
snake_case__ : Tuple = DDPMScheduler()
snake_case__ : Optional[int] = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ : Any = self.dummy_vae
snake_case__ : List[str] = self.dummy_text_encoder
snake_case__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : str = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ : Optional[int] = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
snake_case__ : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[str] = 'A painting of a squirrel eating a burger'
snake_case__ : Optional[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case__ : Union[str, Any] = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ : Union[str, Any] = output.images
snake_case__ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case__ : List[Any] = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=__UpperCamelCase , )[0]
snake_case__ : Any = image[0, -3:, -3:, -1]
snake_case__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
snake_case__ : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.dummy_cond_unet_upscale
snake_case__ : List[str] = DDPMScheduler()
snake_case__ : Optional[int] = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ : List[Any] = self.dummy_vae
snake_case__ : Optional[Any] = self.dummy_text_encoder
snake_case__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Optional[Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ : int = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[Any] = 'A painting of a squirrel eating a burger'
snake_case__ : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ : Tuple = output.images
assert image.shape[0] == 2
snake_case__ : int = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case__ : List[Any] = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ : List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : str = self.dummy_cond_unet_upscale
snake_case__ : str = DDPMScheduler()
snake_case__ : List[Any] = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ : Union[str, Any] = self.dummy_vae
snake_case__ : Tuple = self.dummy_text_encoder
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Optional[int] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ : List[Any] = unet.half()
snake_case__ : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ : List[str] = StableDiffusionUpscalePipeline(
unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , )
snake_case__ : Optional[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : str = 'A painting of a squirrel eating a burger'
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : int = sd_pipe(
[prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=2 , output_type='np' , ).images
snake_case__ : int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
snake_case__ : int = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Tuple = 'a cat sitting on a park bench'
snake_case__ : Union[str, Any] = torch.manual_seed(0 )
snake_case__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type='np' , )
snake_case__ : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
snake_case__ : Dict = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ : str = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = 'a cat sitting on a park bench'
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[Any] = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type='np' , )
snake_case__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __a ( self ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ : Dict = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : Optional[int] = 'a cat sitting on a park bench'
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , output_type='np' , )
snake_case__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 143 | import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A : Tuple = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A : Dict = "main"
# Default branch name
A : List[str] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
A : Tuple = "aaaaaaa"
# This commit does not exist, so we should 404.
A : int = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
A : Tuple = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def a__ ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def a__ ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> Any:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Dict , __magic_name__ : Union[str, Any] ) -> int:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Tuple , __magic_name__ : str ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __A ( self : List[str] ) -> Union[str, Any]:
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
@require_tf
def __A ( self : List[str] ) -> Optional[Any]:
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
@require_flax
def __A ( self : int ) -> Tuple:
# Flax models don't have labels
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , [] )
| 118 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121 |
from math import ceil, sqrt
def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121 | 1 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCamelCase_ = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
with open(__lowercase , "r" , encoding="utf-8" ) as f:
_A = json.loads(f.read() )
_A = collections.OrderedDict()
_A = collections.OrderedDict()
_A = collections.OrderedDict()
with open(__lowercase , "r" , encoding="utf-8" ) as f:
_A = f.readlines()
_A = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(__lowercase ):
_A = b
_A = idx
for wd in b:
_A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]="<|endoftext|>" , __UpperCAmelCase : Optional[Any]="<|endoftext|>" , __UpperCAmelCase : Optional[Any]="<|startoftext|>" , __UpperCAmelCase : Optional[Any]="<|endoftext|>" , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , do_clean_text=__UpperCAmelCase , **__UpperCAmelCase , )
if not os.path.isfile(__UpperCAmelCase ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__UpperCAmelCase ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
_A = do_clean_text
_A , _A , _A , _A = load_vocab_and_emoji(__UpperCAmelCase , __UpperCAmelCase )
_A = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return len(self.raw_vocab )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(__UpperCAmelCase , clean=self.do_clean_text )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase ).strip()
return out_string
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : "Conversation" ):
'''simple docstring'''
_A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
_A = input_ids[-self.model_max_length :]
return input_ids
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
_A = 0
if os.path.isdir(__UpperCAmelCase ):
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
_A = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
_A = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
_A = token_index
writer.write(",".join(__UpperCAmelCase ) + "\n" )
index += 1
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __UpperCAmelCase )
return vocab_file, emoji_file
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = vocab # same as swe
_A = ids_to_tokens # same as bpe
_A = emoji
_A = np.max([len(__UpperCAmelCase ) for w in self.vocab.keys()] )
_A = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
_A = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
_A = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
_A = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
_A = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
_A = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
_A = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
_A = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
_A = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = self.content_repattera.sub("<URL>" , __UpperCAmelCase )
_A = self.content_repattera.sub("<EMAIL>" , __UpperCAmelCase )
_A = self.content_repattera.sub("<TEL>" , __UpperCAmelCase )
_A = self.content_repattera.sub("<DATE>" , __UpperCAmelCase )
_A = self.content_repattera.sub("<DATE>" , __UpperCAmelCase )
_A = self.content_repattera.sub("<PRICE>" , __UpperCAmelCase )
_A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_A = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False ):
'''simple docstring'''
_A = text.replace(" " , "<SP>" )
_A = text.replace(" " , "<SP>" )
_A = text.replace("\r\n" , "<BR>" )
_A = text.replace("\n" , "<BR>" )
_A = text.replace("\r" , "<BR>" )
_A = text.replace("\t" , "<TAB>" )
_A = text.replace("—" , "ー" )
_A = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
_A = text.replace(__UpperCAmelCase , __UpperCAmelCase )
if clean:
_A = self.clean_text(__UpperCAmelCase )
def check_simbol(__UpperCAmelCase : Union[str, Any] ):
_A = x.encode()
if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 2:
_A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(__UpperCAmelCase : Any ):
_A = x.encode()
if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 3:
_A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_8080 and c <= 0xE2_B07F:
return True
return False
_A = 0
_A = []
while pos < len(__UpperCAmelCase ):
_A = min(len(__UpperCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
_A = [] # (token_id, token, pos)
for e in range(__UpperCAmelCase , __UpperCAmelCase , -1 ):
_A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCAmelCase ) > 2:
_A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCAmelCase ) > 0:
# the smallest token_id is adopted
_A , _A , _A = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[0] )[0]
result.append(__UpperCAmelCase )
_A = e
else:
_A = pos + 1
_A = text[pos:end]
if check_simbol(__UpperCAmelCase ):
result.append("<KIGOU>" )
elif checkuae(__UpperCAmelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
_A = end
return result
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]="\n" ):
'''simple docstring'''
_A = []
_A = []
_A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCAmelCase ) > 0:
words.append(bytearray(__UpperCAmelCase ).decode("utf-8" , errors="replace" ) )
_A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__UpperCAmelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
words.append(bytearray(__UpperCAmelCase ).decode("utf-8" , errors="replace" ) )
_A = "".join(__UpperCAmelCase )
return text
| 79 |
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
SCREAMING_SNAKE_CASE__ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 | 0 |
def a__ ( UpperCAmelCase : int ) -> int:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
UpperCAmelCase : Optional[int] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(UpperCAmelCase )
else:
UpperCAmelCase : Dict = sylvester(number - 1 )
UpperCAmelCase : Union[str, Any] = num - 1
UpperCAmelCase : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 99 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ) -> List[Any]:
UpperCAmelCase : Dict = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase : List[Any] = get_sagemaker_input()
else:
UpperCAmelCase : Optional[Any] = get_cluster_input()
return config
def a__ ( UpperCAmelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase : Optional[Any] = subparsers.add_parser('''config''' , description=UpperCAmelCase )
else:
UpperCAmelCase : List[str] = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
UpperCAmelCase : str = get_user_input()
if args.config_file is not None:
UpperCAmelCase : Any = args.config_file
else:
if not os.path.isdir(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
UpperCAmelCase : List[str] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCAmelCase )
else:
config.to_yaml_file(UpperCAmelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a__ ( ) -> Dict:
UpperCAmelCase : str = config_command_parser()
UpperCAmelCase : str = parser.parse_args()
config_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 99 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 20
__lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
__lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCamelCase = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create ramp distribution
__lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCamelCase = 5
__lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
__lowerCamelCase = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCamelCase = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__lowerCamelCase = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCamelCase = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
__lowerCamelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCamelCase = 5
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = 15
__lowerCamelCase = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
__lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCamelCase = 1
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = 5
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCamelCase = 4
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = 10
# no processor list
__lowerCamelCase = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__lowerCamelCase = 10
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
__lowerCamelCase = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
__lowerCamelCase = jax.jit(UpperCamelCase__ )
__lowerCamelCase = jax.jit(UpperCamelCase__ )
__lowerCamelCase = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 90 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowerCAmelCase ( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
A_ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ : List[str] = [tuple(_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else key for key in keys]
A_ : Tuple = Counter(_lowerCamelCase )
A_ : Any = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
A_ : List[Any] = super().construct_mapping(_lowerCamelCase , deep=_lowerCamelCase )
self._check_no_duplicates_on_constructed_node(_lowerCamelCase )
return mapping
def UpperCAmelCase ( a_ ) -> Tuple[Optional[str], str]:
"""simple docstring"""
A_ : Union[str, Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ : List[Any] = full_content[1:].index("""---""" ) + 1
A_ : List[Any] = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a_ )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> "DatasetMetadata":
with open(_lowerCamelCase , encoding="""utf-8""" ) as readme_file:
A_ , A_ : Tuple = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowerCamelCase )
else:
return cls()
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[Any]:
if path.exists():
with open(_lowerCamelCase , encoding="""utf-8""" ) as readme_file:
A_ : int = readme_file.read()
else:
A_ : str = None
A_ : Any = self._to_readme(_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase = None ) -> str:
if readme_content is not None:
A_ , A_ : int = _split_yaml_from_readme(_lowerCamelCase )
A_ : int = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
A_ : Tuple = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> "DatasetMetadata":
A_ : List[Any] = yaml.load(_lowerCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ : Optional[int] = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowerCamelCase , allow_unicode=_lowerCamelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
UpperCamelCase__ : Optional[int] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase__ : List[str] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCamelCase__ : List[str] = ap.parse_args()
UpperCamelCase__ : List[str] = Path(args.readme_filepath)
UpperCamelCase__ : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 164 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {'vocab_file': 'spm_char.model'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
UpperCamelCase__ : Union[str, Any] = {
'microsoft/speecht5_asr': 1_024,
'microsoft/speecht5_tts': 1_024,
'microsoft/speecht5_vc': 1_024,
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : List[Any] = vocab_file
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
A_ : Optional[int] = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self , _lowerCamelCase ) -> List[str]:
A_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Union[str, Any] = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.piece_to_id(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : Dict = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : Optional[int] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + suffix_ones
return ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 164 | 1 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : Any = datasets.logging.get_logger(__name__)
lowercase__ : List[Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : int = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def _snake_case ( self : str , lowercase_ : List[str] ):
if self.config_name == "default":
snake_case_ : int = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
snake_case_ : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _snake_case ( self : str , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Any=None , lowercase_ : Optional[int]=False ):
if gpus is None:
snake_case_ : str = 1 if torch.cuda.is_available() else 0
snake_case_ : List[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
snake_case_ : Dict = [dict(zip(__snake_case , __snake_case ) ) for t in zip(*data.values() )]
snake_case_, snake_case_ : Tuple = self.scorer.predict(__snake_case , gpus=__snake_case , progress_bar=__snake_case )
return {"mean_score": mean_score, "scores": scores}
| 264 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
snake_case , snake_case , snake_case = t // 36_00, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=3_00 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case = F'''{elt:.6f}''' if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) else str(UpperCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A__ :
"""simple docstring"""
__magic_name__ = 5
__magic_name__ = 0.2
def __init__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = 3_0_0 , ):
snake_case = total
snake_case = '''''' if prefix is None else prefix
snake_case = leave
snake_case = parent
snake_case = width
snake_case = None
snake_case = None
snake_case = None
def a_ ( self , __snake_case , __snake_case = False , __snake_case = None ):
snake_case = value
if comment is not None:
snake_case = comment
if self.last_value is None:
snake_case = snake_case = time.time()
snake_case = snake_case = value
snake_case = snake_case = None
snake_case = self.warmup
snake_case = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case = time.time()
snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case = self.elapsed_time / (value - self.start_value)
else:
snake_case = None
if value >= self.total:
snake_case = self.total
snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
snake_case = value
snake_case = current_time
if self.average_time_per_item is None:
snake_case = 1
else:
snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a_ ( self , __snake_case , __snake_case=None ):
snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=None ):
super().__init__(__snake_case )
snake_case = None if column_names is None else [column_names]
snake_case = None
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self , __snake_case ):
if self.inner_table is None:
snake_case = [list(values.keys() ), list(values.values() )]
else:
snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def a_ ( self , __snake_case , __snake_case=None , __snake_case=3_0_0 ):
snake_case = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case )
return self.child_bar
def a_ ( self ):
snake_case = None
self.display()
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = None
snake_case = None
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
snake_case = 0
snake_case = 0
snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
snake_case = NotebookTrainingTracker(state.max_steps , __snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case = self.training_tracker.add_child(len(__snake_case ) )
else:
snake_case = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case = None
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case = state.global_step
self.training_tracker.write_line(__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if self.training_tracker is not None:
snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case = log['''loss''']
break
if self.first_column == "Epoch":
snake_case = int(state.epoch )
else:
snake_case = state.global_step
snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
snake_case = re.sub(R'''\_loss$''' , '''''' , __snake_case )
snake_case = metrics.pop('''total_flos''' , __snake_case )
snake_case = metrics.pop('''epoch''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __snake_case )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
snake_case = v
else:
snake_case = k.split('''_''' )
snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
snake_case = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
snake_case = None
# Evaluation takes a long time so we should force the next update.
snake_case = True
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__snake_case )
snake_case = None
| 127 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowerCamelCase__ ( a , a ) -> Any:
inspect_dataset(a , a )
_A: Any = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowerCamelCase__ ( a , a ) -> Tuple:
inspect_metric(a , a )
_A: str = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( a , a , a ) -> List[Any]:
_A: int = get_dataset_config_info(a , config_name=a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
with pytest.raises(a ):
get_dataset_config_info(a , config_name=a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
_A: Optional[Any] = get_dataset_config_names(a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( a , a , a ) -> Tuple:
_A: List[Any] = get_dataset_infos(a )
assert list(infos.keys() ) == expected_configs
_A: Union[str, Any] = expected_configs[0]
assert expected_config in infos
_A: Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowerCamelCase__ ( a , a , a ) -> List[Any]:
_A: Any = get_dataset_infos(a )
assert expected_config in infos
_A: str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowerCamelCase__ ( a , a , a ) -> Optional[Any]:
with pytest.raises(a ):
get_dataset_split_names(a , config_name=a )
| 359 |
import os
from pathlib import Path
def lowerCamelCase__ ( ) -> Optional[Any]:
from torch.utils.cpp_extension import load
_A: str = Path(a ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
_A: Tuple = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , a , with_cuda=a , extra_include_paths=[str(a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCamelCase : List[Any] = input("Enter message: ")
__UpperCamelCase : Optional[int] = int(input(F'Enter key [2-{len(_lowerCamelCase) - 1}]: '))
__UpperCamelCase : str = input("Encryption/Decryption [e/d]: ")
if mode.lower().startswith("e"):
__UpperCamelCase : List[str] = encrypt_message(_lowerCamelCase , _lowerCamelCase)
elif mode.lower().startswith("d"):
__UpperCamelCase : Dict = decrypt_message(_lowerCamelCase , _lowerCamelCase)
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [""] * key
for col in range(_lowerCamelCase):
__UpperCamelCase : Any = col
while pointer < len(_lowerCamelCase):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Any = math.ceil(len(_lowerCamelCase) / key)
__UpperCamelCase : Any = key
__UpperCamelCase : str = (num_cols * num_rows) - len(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = [""] * num_cols
__UpperCamelCase : Dict = 0
__UpperCamelCase : int = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__UpperCamelCase : List[Any] = 0
row += 1
return "".join(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 232 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :List[Any] ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=a , )
assert hasattr(self , "env" )
def _lowerCamelCase ( self :Any , a :Optional[Any] ) -> Dict:
__UpperCamelCase : str = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__UpperCamelCase : Optional[int] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=a , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=a , py_version="py36" , )
def _lowerCamelCase ( self :Dict , a :Dict ) -> Optional[int]:
TrainingJobAnalytics(a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self :Dict , a :Tuple ) -> List[Any]:
# create estimator
__UpperCamelCase : int = self.create_estimator(a )
# run training
estimator.fit()
# result dataframe
__UpperCamelCase : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__UpperCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a ) | 232 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __snake_case :
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : List[Any]=13 , _lowercase : Optional[int]=7 , _lowercase : Dict=True , _lowercase : Tuple=True , _lowercase : List[str]=True , _lowercase : Optional[int]=True , _lowercase : Tuple=99 , _lowercase : List[Any]=32 , _lowercase : List[str]=2 , _lowercase : str=4 , _lowercase : List[str]=37 , _lowercase : List[str]="gelu" , _lowercase : Dict=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[int]=5_12 , _lowercase : Tuple=16 , _lowercase : Any=2 , _lowercase : Dict=0.02 , _lowercase : Dict=3 , _lowercase : Optional[int]=4 , _lowercase : List[Any]=None , _lowercase : Tuple=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = projection_dim
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDPRContextEncoder(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __a ( self : Dict , _lowercase : Dict , _lowercase : Any , _lowercase : List[str] , _lowercase : str , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDPRQuestionEncoder(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __a ( self : Any , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDPRReader(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __a ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_lowercase )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_lowercase )
@slow
def __a ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFDPRContextEncoder.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFDPRContextEncoder.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFDPRQuestionEncoder.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFDPRReader.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
SCREAMING_SNAKE_CASE__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE__ = model(_lowercase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 204 | import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
SCREAMING_SNAKE_CASE__ = Vector()
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowercase ) , """(0,0,0,0,0,1)""" )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowercase ) , 4 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2] )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4, 5] )
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([2, -1, 4] ) # for test of dot product
SCREAMING_SNAKE_CASE__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __a ( self : str ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , """(3,4,7)""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = x.copy()
self.assertEqual(str(_lowercase ) , str(_lowercase ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowercase ) , """(0,1,0)""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 204 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
UpperCamelCase__ :Any = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCamelCase__ :str = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
UpperCamelCase__ :Optional[Any] = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''lower newer'''
UpperCamelCase__ :Union[str, Any] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ :Union[str, Any] = '''lower newer'''
UpperCamelCase__ :str = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
UpperCamelCase__ :str = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = tokens + [tokenizer.unk_token]
UpperCamelCase__ :Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@require_ftfy
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Any = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
UpperCamelCase__ :Union[str, Any] = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ :Tuple = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
UpperCamelCase__ :Union[str, Any] = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ :List[str] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ :Tuple = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ :Optional[Any] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ :Dict = tokenizer_s.tokenize(UpperCamelCase_ )
UpperCamelCase__ :List[str] = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :str = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ :Tuple = F'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
UpperCamelCase__ :Tuple = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
UpperCamelCase__ :List[Any] = F''' {text}'''
UpperCamelCase__ :List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
UpperCamelCase__ :List[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
import math
def __A ( lowerCAmelCase_ = 100 ):
_UpperCAmelCase : int = sum(i * i for i in range(1 , n + 1 ) )
_UpperCAmelCase : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 369 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase_ : Optional[Any] = 10
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Tuple = (left + right) // 3 + 1
_UpperCAmelCase : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCAmelCase : List[Any] = one_third - 1
elif array[two_third] < target:
_UpperCAmelCase : Optional[Any] = two_third + 1
else:
_UpperCAmelCase : Dict = one_third + 1
_UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = (left + right) // 3 + 1
_UpperCAmelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : int = input('''Enter numbers separated by comma:\n''').strip()
lowerCAmelCase_ : Tuple = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCAmelCase_ : Any = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCAmelCase_ : List[str] = ite_ternary_search(collection, target)
lowerCAmelCase_ : int = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 170 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[Any] = "Salesforce/blip-image-captioning-base"
lowerCamelCase__: List[Any] = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowerCamelCase__: Union[str, Any] = "image_captioner"
lowerCamelCase__: List[str] = AutoModelForVisionaSeq
lowerCamelCase__: Union[str, Any] = ["image"]
lowerCamelCase__: Dict = ["text"]
def __init__( self: int , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: int ) -> List[Any]:
requires_backends(self , ["vision"] )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: "Image" ) -> Union[str, Any]:
return self.pre_processor(images=__lowerCamelCase , return_tensors="pt" )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> Optional[Any]:
return self.model.generate(**__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Union[str, Any] ) -> Any:
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0].strip()
| 157 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Any ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self: Dict ) -> int:
__UpperCAmelCase : Any = 1
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Optional[int] = (32, 32)
__UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def _lowerCamelCase ( self: List[Any] ) -> str:
torch.manual_seed(0 )
__UpperCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self: List[str] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self: int ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__lowerCamelCase )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
def extract(*__lowerCamelCase: List[str] , **__lowerCamelCase: Optional[int] ):
class _snake_case :
def __init__( self: List[str] ) -> int:
__UpperCAmelCase : Tuple = torch.ones([0] )
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] ) -> int:
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
__UpperCAmelCase : int = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.dummy_vae
__UpperCAmelCase : List[str] = self.dummy_text_encoder
__UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__UpperCAmelCase : int = 77
__UpperCAmelCase : List[str] = self.dummy_image.to(__lowerCamelCase )
__UpperCAmelCase : Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
__UpperCAmelCase : List[str] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = "A painting of a squirrel eating a burger"
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__UpperCAmelCase : List[Any] = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , )
__UpperCAmelCase : List[Any] = output.images
__UpperCAmelCase : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__UpperCAmelCase : Tuple = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Tuple = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
__UpperCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
__UpperCAmelCase : Any = self.dummy_vae
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__UpperCAmelCase : Dict = 77
__UpperCAmelCase : Tuple = self.dummy_image.to(__lowerCamelCase )
# put models in fp16
__UpperCAmelCase : Any = unet.half()
__UpperCAmelCase : Optional[Any] = vae.half()
__UpperCAmelCase : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : List[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
__UpperCAmelCase : Any = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : int = "A painting of a squirrel eating a burger"
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Any = alt_pipe(
[prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase : List[Any] = init_image.resize((7_60, 5_04) )
__UpperCAmelCase : Any = "BAAI/AltDiffusion"
__UpperCAmelCase : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase : str = "A fantasy landscape, trending on artstation"
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
__UpperCAmelCase : List[Any] = output.images[0]
__UpperCAmelCase : str = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
__UpperCAmelCase : int = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCAmelCase : Union[str, Any] = init_image.resize((7_68, 5_12) )
__UpperCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
__UpperCAmelCase : Union[str, Any] = "BAAI/AltDiffusion"
__UpperCAmelCase : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase : List[Any] = "A fantasy landscape, trending on artstation"
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : str = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
__UpperCAmelCase : str = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 157 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_snake_case : List[str] = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
inspect_dataset(__lowerCamelCase , __lowerCamelCase )
__snake_case : Union[str, Any] = path + ".py"
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
inspect_metric(__lowerCamelCase , __lowerCamelCase )
__snake_case : Any = path + ".py"
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
with pytest.raises(__lowerCamelCase ):
get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = get_dataset_config_names(__lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = get_dataset_infos(__lowerCamelCase )
assert list(infos.keys() ) == expected_configs
__snake_case : Tuple = expected_configs[0]
assert expected_config in infos
__snake_case : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = get_dataset_infos(__lowerCamelCase )
assert expected_config in infos
__snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
with pytest.raises(__lowerCamelCase ):
get_dataset_split_names(__lowerCamelCase , config_name=__lowerCamelCase )
| 134 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Dict = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "speech_to_text_2"
__UpperCAmelCase : Dict = ["past_key_values"]
__UpperCAmelCase : str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str , lowerCamelCase : List[Any]=10000 , lowerCamelCase : Tuple=6 , lowerCamelCase : Optional[Any]=2048 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : str=True , lowerCamelCase : str="relu" , lowerCamelCase : Tuple=256 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : Any=0.0 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Dict=2 , lowerCamelCase : List[str]=True , lowerCamelCase : str=1 , lowerCamelCase : Any=0 , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[Any]=1024 , **lowerCamelCase : List[str] , ) -> Any:
__snake_case : Tuple = vocab_size
__snake_case : int = d_model
__snake_case : List[str] = decoder_ffn_dim
__snake_case : Union[str, Any] = decoder_layers
__snake_case : Dict = decoder_attention_heads
__snake_case : Any = dropout
__snake_case : List[str] = attention_dropout
__snake_case : Optional[Any] = activation_dropout
__snake_case : Union[str, Any] = activation_function
__snake_case : Union[str, Any] = init_std
__snake_case : Union[str, Any] = decoder_layerdrop
__snake_case : Optional[int] = use_cache
__snake_case : Optional[int] = decoder_layers
__snake_case : str = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Dict = max_target_positions
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
| 134 | 1 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase ( _UpperCAmelCase ):
def __init__(self : int , snake_case__ : List[Any] = None , snake_case__ : str = None , snake_case__ : List[Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : Tuple = False , snake_case__ : int = False , snake_case__ : List[str] = None , **snake_case__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = path_or_paths
snake_case : Tuple = split if split or isinstance(snake_case__ , snake_case__ ) else "train"
snake_case : int = features
snake_case : Tuple = cache_dir
snake_case : List[Any] = keep_in_memory
snake_case : int = streaming
snake_case : List[Any] = num_proc
snake_case : Any = kwargs
@abstractmethod
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class UpperCAmelCase ( _UpperCAmelCase ):
def __init__(self : Union[str, Any] , snake_case__ : Tuple = None , snake_case__ : Any = None , snake_case__ : str = False , snake_case__ : Optional[Any] = False , snake_case__ : Tuple = None , **snake_case__ : Optional[Any] , ) -> str:
'''simple docstring'''
snake_case : List[str] = features
snake_case : List[str] = cache_dir
snake_case : Any = keep_in_memory
snake_case : int = streaming
snake_case : Dict = num_proc
snake_case : Union[str, Any] = kwargs
@abstractmethod
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 59 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = BigBirdTokenizer
_SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> List[str]:
super().setUp()
lowerCAmelCase = self.tokenizer_class(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(lowercase ) , 1_004 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _snake_case ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowercase )
lowerCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = BigBirdTokenizer(lowercase , keep_accents=lowercase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self ) -> Tuple:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def _snake_case ( self ) -> Tuple:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase = """ """.join(lowercase )
lowerCAmelCase = self.big_tokenizer.encode_plus(lowercase , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowercase )
lowerCAmelCase = BigBirdConfig(attention_type="""original_full""" )
lowerCAmelCase = BigBirdModel(lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowerCAmelCase = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _snake_case ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 46 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = BartphoTokenizer
snake_case = False
snake_case = True
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
_A = ["▁This", "▁is", "▁a", "▁t", "est"]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
_A = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = "This is a là test"
_A = "This is a<unk><unk> test"
return input_text, output_text
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
_A = "This is a là test"
_A = "▁This ▁is ▁a ▁l à ▁t est".split()
_A = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokens + [tokenizer.unk_token]
_A = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 174 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ) -> Tuple:
'''simple docstring'''
_A = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_A = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_A , _A = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_A = parse_unknown_args(__lowercase )
# Run
_A = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 174 | 1 |
from __future__ import annotations
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self , A = 6 ) -> None:
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = None
self.create_linked_list(_A )
def __A ( self , A ) -> None:
'''simple docstring'''
lowerCamelCase = Node()
lowerCamelCase = current_node
lowerCamelCase = current_node
lowerCamelCase = current_node
for _ in range(1 , _A ):
lowerCamelCase = Node()
lowerCamelCase = current_node
lowerCamelCase = previous_node
lowerCamelCase = current_node
lowerCamelCase = self.front
lowerCamelCase = previous_node
def __A ( self ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __A ( self ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def __A ( self , A ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase = self.rear.next
if self.rear:
lowerCamelCase = data
def __A ( self ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase = self.front.data
lowerCamelCase = None
return data
lowerCamelCase = self.front
lowerCamelCase = old_front.next
lowerCamelCase = old_front.data
lowerCamelCase = None
return data
def __A ( self ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def __A ( self ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __lowercase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 252 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any]=8 ) -> Union[str, Any]:
_UpperCAmelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=512, _lowerCAmelCase : Optional[Any]=512 ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 )
_UpperCAmelCase : List[Any] = np.array(pil_image.convert("""RGB""" ) )
_UpperCAmelCase : str = arr.astype(np.floataa ) / 127.5 - 1
_UpperCAmelCase : Dict = np.transpose(_lowerCAmelCase, [2, 0, 1] )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( __a):
def __init__( self , _A , _A , _A , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
_UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self , _A , _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = min(int(num_inference_steps * strength ) , _A )
_UpperCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A=None ) -> List[Any]:
'''simple docstring'''
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}''' )
_UpperCAmelCase : Any = image.to(device=_A , dtype=_A )
_UpperCAmelCase : Optional[int] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_UpperCAmelCase : Dict = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_A , _A ):
_UpperCAmelCase : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
_UpperCAmelCase : List[Any] = torch.cat(_A , dim=0 )
else:
_UpperCAmelCase : str = self.movq.encode(_A ).latent_dist.sample(_A )
_UpperCAmelCase : Any = self.movq.config.scaling_factor * init_latents
_UpperCAmelCase : List[Any] = torch.cat([init_latents] , dim=0 )
_UpperCAmelCase : Union[str, Any] = init_latents.shape
_UpperCAmelCase : List[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
_UpperCAmelCase : Optional[int] = self.scheduler.add_noise(_A , _A , _A )
_UpperCAmelCase : Optional[int] = init_latents
return latents
def __snake_case ( self , _A=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __snake_case ( self , _A=0 ) -> int:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_UpperCAmelCase : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_12 , _A = 5_12 , _A = 1_00 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self._execution_device
_UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(_A , _A ):
_UpperCAmelCase : Dict = torch.cat(_A , dim=0 )
_UpperCAmelCase : Any = image_embeds.shape[0]
if isinstance(_A , _A ):
_UpperCAmelCase : Any = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : str = image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
_UpperCAmelCase : str = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_UpperCAmelCase : Union[str, Any] = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
_UpperCAmelCase : List[Any] = image.to(dtype=image_embeds.dtype , device=_A )
_UpperCAmelCase : int = self.movq.encode(_A )["""latents"""]
_UpperCAmelCase : Dict = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(_A , _A , _A )
_UpperCAmelCase : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_UpperCAmelCase , _UpperCAmelCase : str = downscale_height_and_width(_A , _A , self.movq_scale_factor )
_UpperCAmelCase : List[Any] = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
_UpperCAmelCase : str = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = variance_pred.chunk(2 )
_UpperCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
_UpperCAmelCase : Optional[int] = self.movq.decode(_A , force_not_quantize=_A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Any = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 246 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
__lowerCamelCase : List[str] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[str] = False
def snake_case_ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Tuple = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
lowercase__ : str = dict(zip(a , range(len(a))))
lowercase__ : Optional[int] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
lowercase__ : Dict = {'unk_token': '<unk>'}
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(a) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(a))
def snake_case_ ( self , **a):
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a)
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = 'adapt react readapt apt'
lowercase__ : Dict = 'adapt react readapt apt'
return input_text, output_text
def snake_case_ ( self):
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowercase__ : Optional[Any] = 'adapt react readapt apt'
lowercase__ : Optional[int] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
lowercase__ : List[str] = tokenizer.tokenize(a)
self.assertListEqual(a , a)
lowercase__ : Dict = tokens + [tokenizer.unk_token]
lowercase__ : Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , a)
| 369 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowercase__ : Optional[Any] = os.path.join(lowerCamelCase__ , "words.txt" )
lowercase__ : int = ""
with open(lowerCamelCase__ ) as f:
lowercase__ : Any = f.readline()
lowercase__ : Any = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowercase__ : str = [
word
for word in [sum(ord(lowerCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(solution())
| 130 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCamelCase , _UpperCamelCase ) ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[float] ) -> None:
'''simple docstring'''
if point:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for item in point:
if not isinstance(_UpperCamelCase , (int, float) ):
UpperCamelCase__ = (
"Expected a list of numbers as input, found "
F'{type(_UpperCamelCase ).__name__}'
)
raise TypeError(_UpperCamelCase )
else:
UpperCamelCase__ = F'Expected a list of numbers as input, found {type(_UpperCamelCase ).__name__}'
raise TypeError(_UpperCamelCase )
else:
raise ValueError("Missing an input" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCamelCase , _UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
snake_case_ : Any = logging.getLogger(__name__)
snake_case_ : Optional[Any] = {'facebook/bart-base': BartForConditionalGeneration}
snake_case_ : Optional[int] = {'facebook/bart-base': BartTokenizer}
def A__ ( ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=UpperCAmelCase_ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--config_name' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=UpperCAmelCase_ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Where to store the final ONNX file.' )
_UpperCamelCase : Tuple = parser.parse_args()
return args
def A__ ( UpperCAmelCase_ , UpperCAmelCase_="cpu" ):
_UpperCamelCase : Any = model_dict[model_name].from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase_ )
if model_name in ["facebook/bart-base"]:
_UpperCamelCase : Any = 0
_UpperCamelCase : int = None
_UpperCamelCase : int = 0
return huggingface_model, tokenizer
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
model.eval()
_UpperCamelCase : str = None
_UpperCamelCase : str = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase_ ) )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = 'My friends are cool but they eat too many carbs.'
_UpperCamelCase : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='pt' ).to(model.device )
_UpperCamelCase : List[Any] = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=UpperCAmelCase_ , max_length=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCAmelCase_ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCAmelCase_ , opset_version=1_4 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=UpperCAmelCase_ , )
logger.info('Model exported to {}'.format(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = remove_dup_initializers(os.path.abspath(UpperCAmelCase_ ) )
logger.info('Deduplicated and optimized model written to {}'.format(UpperCAmelCase_ ) )
_UpperCamelCase : Tuple = onnxruntime.InferenceSession(UpperCAmelCase_ )
_UpperCamelCase : Dict = ort_sess.run(
UpperCAmelCase_ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(UpperCAmelCase_ ),
'max_length': np.array(UpperCAmelCase_ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A__ ( ):
_UpperCamelCase : Any = parse_args()
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : List[str] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCamelCase : Union[str, Any] = torch.device(args.device )
_UpperCamelCase , _UpperCamelCase : Tuple = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(UpperCAmelCase_ )
if args.max_length:
_UpperCamelCase : Optional[int] = args.max_length
if args.num_beams:
_UpperCamelCase : Any = args.num_beams
if args.output_file_path:
_UpperCamelCase : str = args.output_file_path
else:
_UpperCamelCase : List[Any] = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 83 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """mvp"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] ,lowerCamelCase__ : Any=50267 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[int]=800 ,**lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = d_model
_UpperCamelCase : Any = encoder_ffn_dim
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Optional[Any] = encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : str = dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : Dict = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : Dict = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = encoder_layers
_UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = use_prompt
_UpperCamelCase : Optional[int] = prompt_length
_UpperCamelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 83 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , a = True , a = 1 / 255 , a = True , a = 8 , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = pad_size
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_image_size(a)
SCREAMING_SNAKE_CASE = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE = (old_width // size + 1) * size - old_width
return pad(a , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE = [self.pad(a , size=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 360 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , a = True , a = 1 / 255 , a = True , a = 8 , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = pad_size
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_image_size(a)
SCREAMING_SNAKE_CASE = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE = (old_width // size + 1) * size - old_width
return pad(a , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE = [self.pad(a , size=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 327 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = "ylacombe/bark-small"
lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase__ : str = "en_speaker_1"
lowerCAmelCase__ : List[str] = "This is a test string"
lowerCAmelCase__ : Dict = "speaker_embeddings_path.json"
lowerCAmelCase__ : List[str] = "speaker_embeddings"
def __lowerCAmelCase ( self : Optional[int] ,**lowercase_ : Union[str, Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
lowerCAmelCase__ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
lowerCAmelCase__ : Any = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
lowerCAmelCase__ : List[Any] = 3_5
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : List[str] = 8
lowerCAmelCase__ : int = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase__ : List[str] = processor(text=self.input_string ,voice_preset=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowercase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,'''file.npz''' )
np.savez(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Tuple = processor(text=self.input_string ,voice_preset=lowercase_ )
lowerCAmelCase__ : Dict = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowercase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase__ : Any = processor(text=self.input_string ,voice_preset=self.voice_preset )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Tuple = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = BarkProcessor(tokenizer=lowercase_ )
lowerCAmelCase__ : Dict = processor(text=self.input_string )
lowerCAmelCase__ : int = tokenizer(
self.input_string ,padding='''max_length''' ,max_length=2_5_6 ,add_special_tokens=lowercase_ ,return_attention_mask=lowercase_ ,return_token_type_ids=lowercase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 106 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 369 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''openai/whisper-base'''
__lowerCAmelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__lowerCAmelCase = '''transcriber'''
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ['''audio''']
__lowerCAmelCase = ['''text''']
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0] | 188 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.